]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/infiniband/hw/hfi1/pio_copy.c
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / hfi1 / pio_copy.c
1 /*
2 * Copyright(c) 2015, 2016 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48 #include "hfi.h"
49
50 /* additive distance between non-SOP and SOP space */
51 #define SOP_DISTANCE (TXE_PIO_SIZE / 2)
52 #define PIO_BLOCK_MASK (PIO_BLOCK_SIZE - 1)
53 /* number of QUADWORDs in a block */
54 #define PIO_BLOCK_QWS (PIO_BLOCK_SIZE / sizeof(u64))
55
56 /**
57 * pio_copy - copy data block to MMIO space
58 * @pbuf: a number of blocks allocated within a PIO send context
59 * @pbc: PBC to send
60 * @from: source, must be 8 byte aligned
61 * @count: number of DWORD (32-bit) quantities to copy from source
62 *
63 * Copy data from source to PIO Send Buffer memory, 8 bytes at a time.
64 * Must always write full BLOCK_SIZE bytes blocks. The first block must
65 * be written to the corresponding SOP=1 address.
66 *
67 * Known:
68 * o pbuf->start always starts on a block boundary
69 * o pbuf can wrap only at a block boundary
70 */
71 void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
72 const void *from, size_t count)
73 {
74 void __iomem *dest = pbuf->start + SOP_DISTANCE;
75 void __iomem *send = dest + PIO_BLOCK_SIZE;
76 void __iomem *dend; /* 8-byte data end */
77
78 /* write the PBC */
79 writeq(pbc, dest);
80 dest += sizeof(u64);
81
82 /* calculate where the QWORD data ends - in SOP=1 space */
83 dend = dest + ((count >> 1) * sizeof(u64));
84
85 if (dend < send) {
86 /*
87 * all QWORD data is within the SOP block, does *not*
88 * reach the end of the SOP block
89 */
90
91 while (dest < dend) {
92 writeq(*(u64 *)from, dest);
93 from += sizeof(u64);
94 dest += sizeof(u64);
95 }
96 /*
97 * No boundary checks are needed here:
98 * 0. We're not on the SOP block boundary
99 * 1. The possible DWORD dangle will still be within
100 * the SOP block
101 * 2. We cannot wrap except on a block boundary.
102 */
103 } else {
104 /* QWORD data extends _to_ or beyond the SOP block */
105
106 /* write 8-byte SOP chunk data */
107 while (dest < send) {
108 writeq(*(u64 *)from, dest);
109 from += sizeof(u64);
110 dest += sizeof(u64);
111 }
112 /* drop out of the SOP range */
113 dest -= SOP_DISTANCE;
114 dend -= SOP_DISTANCE;
115
116 /*
117 * If the wrap comes before or matches the data end,
118 * copy until until the wrap, then wrap.
119 *
120 * If the data ends at the end of the SOP above and
121 * the buffer wraps, then pbuf->end == dend == dest
122 * and nothing will get written, but we will wrap in
123 * case there is a dangling DWORD.
124 */
125 if (pbuf->end <= dend) {
126 while (dest < pbuf->end) {
127 writeq(*(u64 *)from, dest);
128 from += sizeof(u64);
129 dest += sizeof(u64);
130 }
131
132 dest -= pbuf->sc->size;
133 dend -= pbuf->sc->size;
134 }
135
136 /* write 8-byte non-SOP, non-wrap chunk data */
137 while (dest < dend) {
138 writeq(*(u64 *)from, dest);
139 from += sizeof(u64);
140 dest += sizeof(u64);
141 }
142 }
143 /* at this point we have wrapped if we are going to wrap */
144
145 /* write dangling u32, if any */
146 if (count & 1) {
147 union mix val;
148
149 val.val64 = 0;
150 val.val32[0] = *(u32 *)from;
151 writeq(val.val64, dest);
152 dest += sizeof(u64);
153 }
154 /*
155 * fill in rest of block, no need to check pbuf->end
156 * as we only wrap on a block boundary
157 */
158 while (((unsigned long)dest & PIO_BLOCK_MASK) != 0) {
159 writeq(0, dest);
160 dest += sizeof(u64);
161 }
162
163 /* finished with this buffer */
164 this_cpu_dec(*pbuf->sc->buffers_allocated);
165 preempt_enable();
166 }
167
168 /*
169 * Handle carry bytes using shifts and masks.
170 *
171 * NOTE: the value the unused portion of carry is expected to always be zero.
172 */
173
174 /*
175 * "zero" shift - bit shift used to zero out upper bytes. Input is
176 * the count of LSB bytes to preserve.
177 */
178 #define zshift(x) (8 * (8 - (x)))
179
180 /*
181 * "merge" shift - bit shift used to merge with carry bytes. Input is
182 * the LSB byte count to move beyond.
183 */
184 #define mshift(x) (8 * (x))
185
186 /*
187 * Jump copy - no-loop copy for < 8 bytes.
188 */
189 static inline void jcopy(u8 *dest, const u8 *src, u32 n)
190 {
191 switch (n) {
192 case 7:
193 *dest++ = *src++;
194 /* fall through */
195 case 6:
196 *dest++ = *src++;
197 /* fall through */
198 case 5:
199 *dest++ = *src++;
200 /* fall through */
201 case 4:
202 *dest++ = *src++;
203 /* fall through */
204 case 3:
205 *dest++ = *src++;
206 /* fall through */
207 case 2:
208 *dest++ = *src++;
209 /* fall through */
210 case 1:
211 *dest++ = *src++;
212 /* fall through */
213 }
214 }
215
216 /*
217 * Read nbytes from "from" and and place them in the low bytes
218 * of pbuf->carry. Other bytes are left as-is. Any previous
219 * value in pbuf->carry is lost.
220 *
221 * NOTES:
222 * o do not read from from if nbytes is zero
223 * o from may _not_ be u64 aligned.
224 */
225 static inline void read_low_bytes(struct pio_buf *pbuf, const void *from,
226 unsigned int nbytes)
227 {
228 pbuf->carry.val64 = 0;
229 jcopy(&pbuf->carry.val8[0], from, nbytes);
230 pbuf->carry_bytes = nbytes;
231 }
232
233 /*
234 * Read nbytes bytes from "from" and put them at the end of pbuf->carry.
235 * It is expected that the extra read does not overfill carry.
236 *
237 * NOTES:
238 * o from may _not_ be u64 aligned
239 * o nbytes may span a QW boundary
240 */
241 static inline void read_extra_bytes(struct pio_buf *pbuf,
242 const void *from, unsigned int nbytes)
243 {
244 jcopy(&pbuf->carry.val8[pbuf->carry_bytes], from, nbytes);
245 pbuf->carry_bytes += nbytes;
246 }
247
248 /*
249 * Write a quad word using parts of pbuf->carry and the next 8 bytes of src.
250 * Put the unused part of the next 8 bytes of src into the LSB bytes of
251 * pbuf->carry with the upper bytes zeroed..
252 *
253 * NOTES:
254 * o result must keep unused bytes zeroed
255 * o src must be u64 aligned
256 */
257 static inline void merge_write8(
258 struct pio_buf *pbuf,
259 void __iomem *dest,
260 const void *src)
261 {
262 u64 new, temp;
263
264 new = *(u64 *)src;
265 temp = pbuf->carry.val64 | (new << mshift(pbuf->carry_bytes));
266 writeq(temp, dest);
267 pbuf->carry.val64 = new >> zshift(pbuf->carry_bytes);
268 }
269
270 /*
271 * Write a quad word using all bytes of carry.
272 */
273 static inline void carry8_write8(union mix carry, void __iomem *dest)
274 {
275 writeq(carry.val64, dest);
276 }
277
278 /*
279 * Write a quad word using all the valid bytes of carry. If carry
280 * has zero valid bytes, nothing is written.
281 * Returns 0 on nothing written, non-zero on quad word written.
282 */
283 static inline int carry_write8(struct pio_buf *pbuf, void __iomem *dest)
284 {
285 if (pbuf->carry_bytes) {
286 /* unused bytes are always kept zeroed, so just write */
287 writeq(pbuf->carry.val64, dest);
288 return 1;
289 }
290
291 return 0;
292 }
293
294 /*
295 * Segmented PIO Copy - start
296 *
297 * Start a PIO copy.
298 *
299 * @pbuf: destination buffer
300 * @pbc: the PBC for the PIO buffer
301 * @from: data source, QWORD aligned
302 * @nbytes: bytes to copy
303 */
304 void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc,
305 const void *from, size_t nbytes)
306 {
307 void __iomem *dest = pbuf->start + SOP_DISTANCE;
308 void __iomem *send = dest + PIO_BLOCK_SIZE;
309 void __iomem *dend; /* 8-byte data end */
310
311 writeq(pbc, dest);
312 dest += sizeof(u64);
313
314 /* calculate where the QWORD data ends - in SOP=1 space */
315 dend = dest + ((nbytes >> 3) * sizeof(u64));
316
317 if (dend < send) {
318 /*
319 * all QWORD data is within the SOP block, does *not*
320 * reach the end of the SOP block
321 */
322
323 while (dest < dend) {
324 writeq(*(u64 *)from, dest);
325 from += sizeof(u64);
326 dest += sizeof(u64);
327 }
328 /*
329 * No boundary checks are needed here:
330 * 0. We're not on the SOP block boundary
331 * 1. The possible DWORD dangle will still be within
332 * the SOP block
333 * 2. We cannot wrap except on a block boundary.
334 */
335 } else {
336 /* QWORD data extends _to_ or beyond the SOP block */
337
338 /* write 8-byte SOP chunk data */
339 while (dest < send) {
340 writeq(*(u64 *)from, dest);
341 from += sizeof(u64);
342 dest += sizeof(u64);
343 }
344 /* drop out of the SOP range */
345 dest -= SOP_DISTANCE;
346 dend -= SOP_DISTANCE;
347
348 /*
349 * If the wrap comes before or matches the data end,
350 * copy until until the wrap, then wrap.
351 *
352 * If the data ends at the end of the SOP above and
353 * the buffer wraps, then pbuf->end == dend == dest
354 * and nothing will get written, but we will wrap in
355 * case there is a dangling DWORD.
356 */
357 if (pbuf->end <= dend) {
358 while (dest < pbuf->end) {
359 writeq(*(u64 *)from, dest);
360 from += sizeof(u64);
361 dest += sizeof(u64);
362 }
363
364 dest -= pbuf->sc->size;
365 dend -= pbuf->sc->size;
366 }
367
368 /* write 8-byte non-SOP, non-wrap chunk data */
369 while (dest < dend) {
370 writeq(*(u64 *)from, dest);
371 from += sizeof(u64);
372 dest += sizeof(u64);
373 }
374 }
375 /* at this point we have wrapped if we are going to wrap */
376
377 /* ...but it doesn't matter as we're done writing */
378
379 /* save dangling bytes, if any */
380 read_low_bytes(pbuf, from, nbytes & 0x7);
381
382 pbuf->qw_written = 1 /*PBC*/ + (nbytes >> 3);
383 }
384
385 /*
386 * Mid copy helper, "mixed case" - source is 64-bit aligned but carry
387 * bytes are non-zero.
388 *
389 * Whole u64s must be written to the chip, so bytes must be manually merged.
390 *
391 * @pbuf: destination buffer
392 * @from: data source, is QWORD aligned.
393 * @nbytes: bytes to copy
394 *
395 * Must handle nbytes < 8.
396 */
397 static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes)
398 {
399 void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
400 void __iomem *dend; /* 8-byte data end */
401 unsigned long qw_to_write = nbytes >> 3;
402 unsigned long bytes_left = nbytes & 0x7;
403
404 /* calculate 8-byte data end */
405 dend = dest + (qw_to_write * sizeof(u64));
406
407 if (pbuf->qw_written < PIO_BLOCK_QWS) {
408 /*
409 * Still within SOP block. We don't need to check for
410 * wrap because we are still in the first block and
411 * can only wrap on block boundaries.
412 */
413 void __iomem *send; /* SOP end */
414 void __iomem *xend;
415
416 /*
417 * calculate the end of data or end of block, whichever
418 * comes first
419 */
420 send = pbuf->start + PIO_BLOCK_SIZE;
421 xend = min(send, dend);
422
423 /* shift up to SOP=1 space */
424 dest += SOP_DISTANCE;
425 xend += SOP_DISTANCE;
426
427 /* write 8-byte chunk data */
428 while (dest < xend) {
429 merge_write8(pbuf, dest, from);
430 from += sizeof(u64);
431 dest += sizeof(u64);
432 }
433
434 /* shift down to SOP=0 space */
435 dest -= SOP_DISTANCE;
436 }
437 /*
438 * At this point dest could be (either, both, or neither):
439 * - at dend
440 * - at the wrap
441 */
442
443 /*
444 * If the wrap comes before or matches the data end,
445 * copy until until the wrap, then wrap.
446 *
447 * If dest is at the wrap, we will fall into the if,
448 * not do the loop, when wrap.
449 *
450 * If the data ends at the end of the SOP above and
451 * the buffer wraps, then pbuf->end == dend == dest
452 * and nothing will get written.
453 */
454 if (pbuf->end <= dend) {
455 while (dest < pbuf->end) {
456 merge_write8(pbuf, dest, from);
457 from += sizeof(u64);
458 dest += sizeof(u64);
459 }
460
461 dest -= pbuf->sc->size;
462 dend -= pbuf->sc->size;
463 }
464
465 /* write 8-byte non-SOP, non-wrap chunk data */
466 while (dest < dend) {
467 merge_write8(pbuf, dest, from);
468 from += sizeof(u64);
469 dest += sizeof(u64);
470 }
471
472 pbuf->qw_written += qw_to_write;
473
474 /* handle carry and left-over bytes */
475 if (pbuf->carry_bytes + bytes_left >= 8) {
476 unsigned long nread;
477
478 /* there is enough to fill another qw - fill carry */
479 nread = 8 - pbuf->carry_bytes;
480 read_extra_bytes(pbuf, from, nread);
481
482 /*
483 * One more write - but need to make sure dest is correct.
484 * Check for wrap and the possibility the write
485 * should be in SOP space.
486 *
487 * The two checks immediately below cannot both be true, hence
488 * the else. If we have wrapped, we cannot still be within the
489 * first block. Conversely, if we are still in the first block,
490 * we cannot have wrapped. We do the wrap check first as that
491 * is more likely.
492 */
493 /* adjust if we have wrapped */
494 if (dest >= pbuf->end)
495 dest -= pbuf->sc->size;
496 /* jump to the SOP range if within the first block */
497 else if (pbuf->qw_written < PIO_BLOCK_QWS)
498 dest += SOP_DISTANCE;
499
500 /* flush out full carry */
501 carry8_write8(pbuf->carry, dest);
502 pbuf->qw_written++;
503
504 /* now adjust and read the rest of the bytes into carry */
505 bytes_left -= nread;
506 from += nread; /* from is now not aligned */
507 read_low_bytes(pbuf, from, bytes_left);
508 } else {
509 /* not enough to fill another qw, append the rest to carry */
510 read_extra_bytes(pbuf, from, bytes_left);
511 }
512 }
513
514 /*
515 * Mid copy helper, "straight case" - source pointer is 64-bit aligned
516 * with no carry bytes.
517 *
518 * @pbuf: destination buffer
519 * @from: data source, is QWORD aligned
520 * @nbytes: bytes to copy
521 *
522 * Must handle nbytes < 8.
523 */
524 static void mid_copy_straight(struct pio_buf *pbuf,
525 const void *from, size_t nbytes)
526 {
527 void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
528 void __iomem *dend; /* 8-byte data end */
529
530 /* calculate 8-byte data end */
531 dend = dest + ((nbytes >> 3) * sizeof(u64));
532
533 if (pbuf->qw_written < PIO_BLOCK_QWS) {
534 /*
535 * Still within SOP block. We don't need to check for
536 * wrap because we are still in the first block and
537 * can only wrap on block boundaries.
538 */
539 void __iomem *send; /* SOP end */
540 void __iomem *xend;
541
542 /*
543 * calculate the end of data or end of block, whichever
544 * comes first
545 */
546 send = pbuf->start + PIO_BLOCK_SIZE;
547 xend = min(send, dend);
548
549 /* shift up to SOP=1 space */
550 dest += SOP_DISTANCE;
551 xend += SOP_DISTANCE;
552
553 /* write 8-byte chunk data */
554 while (dest < xend) {
555 writeq(*(u64 *)from, dest);
556 from += sizeof(u64);
557 dest += sizeof(u64);
558 }
559
560 /* shift down to SOP=0 space */
561 dest -= SOP_DISTANCE;
562 }
563 /*
564 * At this point dest could be (either, both, or neither):
565 * - at dend
566 * - at the wrap
567 */
568
569 /*
570 * If the wrap comes before or matches the data end,
571 * copy until until the wrap, then wrap.
572 *
573 * If dest is at the wrap, we will fall into the if,
574 * not do the loop, when wrap.
575 *
576 * If the data ends at the end of the SOP above and
577 * the buffer wraps, then pbuf->end == dend == dest
578 * and nothing will get written.
579 */
580 if (pbuf->end <= dend) {
581 while (dest < pbuf->end) {
582 writeq(*(u64 *)from, dest);
583 from += sizeof(u64);
584 dest += sizeof(u64);
585 }
586
587 dest -= pbuf->sc->size;
588 dend -= pbuf->sc->size;
589 }
590
591 /* write 8-byte non-SOP, non-wrap chunk data */
592 while (dest < dend) {
593 writeq(*(u64 *)from, dest);
594 from += sizeof(u64);
595 dest += sizeof(u64);
596 }
597
598 /* we know carry_bytes was zero on entry to this routine */
599 read_low_bytes(pbuf, from, nbytes & 0x7);
600
601 pbuf->qw_written += nbytes >> 3;
602 }
603
604 /*
605 * Segmented PIO Copy - middle
606 *
607 * Must handle any aligned tail and any aligned source with any byte count.
608 *
609 * @pbuf: a number of blocks allocated within a PIO send context
610 * @from: data source
611 * @nbytes: number of bytes to copy
612 */
613 void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
614 {
615 unsigned long from_align = (unsigned long)from & 0x7;
616
617 if (pbuf->carry_bytes + nbytes < 8) {
618 /* not enough bytes to fill a QW */
619 read_extra_bytes(pbuf, from, nbytes);
620 return;
621 }
622
623 if (from_align) {
624 /* misaligned source pointer - align it */
625 unsigned long to_align;
626
627 /* bytes to read to align "from" */
628 to_align = 8 - from_align;
629
630 /*
631 * In the advance-to-alignment logic below, we do not need
632 * to check if we are using more than nbytes. This is because
633 * if we are here, we already know that carry+nbytes will
634 * fill at least one QW.
635 */
636 if (pbuf->carry_bytes + to_align < 8) {
637 /* not enough align bytes to fill a QW */
638 read_extra_bytes(pbuf, from, to_align);
639 from += to_align;
640 nbytes -= to_align;
641 } else {
642 /* bytes to fill carry */
643 unsigned long to_fill = 8 - pbuf->carry_bytes;
644 /* bytes left over to be read */
645 unsigned long extra = to_align - to_fill;
646 void __iomem *dest;
647
648 /* fill carry... */
649 read_extra_bytes(pbuf, from, to_fill);
650 from += to_fill;
651 nbytes -= to_fill;
652 /* may not be enough valid bytes left to align */
653 if (extra > nbytes)
654 extra = nbytes;
655
656 /* ...now write carry */
657 dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
658
659 /*
660 * The two checks immediately below cannot both be
661 * true, hence the else. If we have wrapped, we
662 * cannot still be within the first block.
663 * Conversely, if we are still in the first block, we
664 * cannot have wrapped. We do the wrap check first
665 * as that is more likely.
666 */
667 /* adjust if we've wrapped */
668 if (dest >= pbuf->end)
669 dest -= pbuf->sc->size;
670 /* jump to SOP range if within the first block */
671 else if (pbuf->qw_written < PIO_BLOCK_QWS)
672 dest += SOP_DISTANCE;
673
674 carry8_write8(pbuf->carry, dest);
675 pbuf->qw_written++;
676
677 /* read any extra bytes to do final alignment */
678 /* this will overwrite anything in pbuf->carry */
679 read_low_bytes(pbuf, from, extra);
680 from += extra;
681 nbytes -= extra;
682 /*
683 * If no bytes are left, return early - we are done.
684 * NOTE: This short-circuit is *required* because
685 * "extra" may have been reduced in size and "from"
686 * is not aligned, as required when leaving this
687 * if block.
688 */
689 if (nbytes == 0)
690 return;
691 }
692
693 /* at this point, from is QW aligned */
694 }
695
696 if (pbuf->carry_bytes)
697 mid_copy_mix(pbuf, from, nbytes);
698 else
699 mid_copy_straight(pbuf, from, nbytes);
700 }
701
702 /*
703 * Segmented PIO Copy - end
704 *
705 * Write any remainder (in pbuf->carry) and finish writing the whole block.
706 *
707 * @pbuf: a number of blocks allocated within a PIO send context
708 */
709 void seg_pio_copy_end(struct pio_buf *pbuf)
710 {
711 void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
712
713 /*
714 * The two checks immediately below cannot both be true, hence the
715 * else. If we have wrapped, we cannot still be within the first
716 * block. Conversely, if we are still in the first block, we
717 * cannot have wrapped. We do the wrap check first as that is
718 * more likely.
719 */
720 /* adjust if we have wrapped */
721 if (dest >= pbuf->end)
722 dest -= pbuf->sc->size;
723 /* jump to the SOP range if within the first block */
724 else if (pbuf->qw_written < PIO_BLOCK_QWS)
725 dest += SOP_DISTANCE;
726
727 /* write final bytes, if any */
728 if (carry_write8(pbuf, dest)) {
729 dest += sizeof(u64);
730 /*
731 * NOTE: We do not need to recalculate whether dest needs
732 * SOP_DISTANCE or not.
733 *
734 * If we are in the first block and the dangle write
735 * keeps us in the same block, dest will need
736 * to retain SOP_DISTANCE in the loop below.
737 *
738 * If we are in the first block and the dangle write pushes
739 * us to the next block, then loop below will not run
740 * and dest is not used. Hence we do not need to update
741 * it.
742 *
743 * If we are past the first block, then SOP_DISTANCE
744 * was never added, so there is nothing to do.
745 */
746 }
747
748 /* fill in rest of block */
749 while (((unsigned long)dest & PIO_BLOCK_MASK) != 0) {
750 writeq(0, dest);
751 dest += sizeof(u64);
752 }
753
754 /* finished with this buffer */
755 this_cpu_dec(*pbuf->sc->buffers_allocated);
756 preempt_enable();
757 }