2 * Intel MIC Platform Software Stack (MPSS)
4 * Copyright(c) 2015 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
18 #include "scif_main.h"
22 * struct scif_dma_comp_cb - SCIF DMA completion callback
24 * @dma_completion_func: DMA completion callback
25 * @cb_cookie: DMA completion callback cookie
26 * @temp_buf: Temporary buffer
27 * @temp_buf_to_free: Temporary buffer to be freed
28 * @is_cache: Is a kmem_cache allocated buffer
29 * @dst_offset: Destination registration offset
30 * @dst_window: Destination registration window
31 * @len: Length of the temp buffer
32 * @temp_phys: DMA address of the temp buffer
33 * @sdev: The SCIF device
34 * @header_padding: padding for cache line alignment
36 struct scif_dma_comp_cb
{
37 void (*dma_completion_func
)(void *cookie
);
43 struct scif_window
*dst_window
;
46 struct scif_dev
*sdev
;
51 * struct scif_copy_work - Work for DMA copy
53 * @src_offset: Starting source offset
54 * @dst_offset: Starting destination offset
55 * @src_window: Starting src registered window
56 * @dst_window: Starting dst registered window
57 * @loopback: true if this is a loopback DMA transfer
58 * @len: Length of the transfer
59 * @comp_cb: DMA copy completion callback
60 * @remote_dev: The remote SCIF peer device
61 * @fence_type: polling or interrupt based
62 * @ordered: is this a tail byte ordered DMA transfer
64 struct scif_copy_work
{
67 struct scif_window
*src_window
;
68 struct scif_window
*dst_window
;
71 struct scif_dma_comp_cb
*comp_cb
;
72 struct scif_dev
*remote_dev
;
78 * scif_reserve_dma_chan:
79 * @ep: Endpoint Descriptor.
81 * This routine reserves a DMA channel for a particular
82 * endpoint. All DMA transfers for an endpoint are always
83 * programmed on the same DMA channel.
85 int scif_reserve_dma_chan(struct scif_endpt
*ep
)
88 struct scif_dev
*scifdev
;
89 struct scif_hw_dev
*sdev
;
90 struct dma_chan
*chan
;
92 /* Loopback DMAs are not supported on the management node */
93 if (!scif_info
.nodeid
&& scifdev_self(ep
->remote_dev
))
96 scifdev
= &scif_dev
[0];
98 scifdev
= ep
->remote_dev
;
100 if (!sdev
->num_dma_ch
)
102 chan
= sdev
->dma_ch
[scifdev
->dma_ch_idx
];
103 scifdev
->dma_ch_idx
= (scifdev
->dma_ch_idx
+ 1) % sdev
->num_dma_ch
;
104 mutex_lock(&ep
->rma_info
.rma_lock
);
105 ep
->rma_info
.dma_chan
= chan
;
106 mutex_unlock(&ep
->rma_info
.rma_lock
);
110 #ifdef CONFIG_MMU_NOTIFIER
112 * scif_rma_destroy_tcw:
114 * This routine destroys temporary cached windows
117 void __scif_rma_destroy_tcw(struct scif_mmu_notif
*mmn
,
118 struct scif_endpt
*ep
,
121 struct list_head
*item
, *tmp
;
122 struct scif_window
*window
;
123 u64 start_va
, end_va
;
124 u64 end
= start
+ len
;
129 list_for_each_safe(item
, tmp
, &mmn
->tc_reg_list
) {
130 window
= list_entry(item
, struct scif_window
, list
);
131 ep
= (struct scif_endpt
*)window
->ep
;
134 start_va
= window
->va_for_temp
;
135 end_va
= start_va
+ (window
->nr_pages
<< PAGE_SHIFT
);
136 if (start
< start_va
&& end
<= start_va
)
140 __scif_rma_destroy_tcw_helper(window
);
144 static void scif_rma_destroy_tcw(struct scif_mmu_notif
*mmn
, u64 start
, u64 len
)
146 struct scif_endpt
*ep
= mmn
->ep
;
148 spin_lock(&ep
->rma_info
.tc_lock
);
149 __scif_rma_destroy_tcw(mmn
, ep
, start
, len
);
150 spin_unlock(&ep
->rma_info
.tc_lock
);
153 static void scif_rma_destroy_tcw_ep(struct scif_endpt
*ep
)
155 struct list_head
*item
, *tmp
;
156 struct scif_mmu_notif
*mmn
;
158 list_for_each_safe(item
, tmp
, &ep
->rma_info
.mmn_list
) {
159 mmn
= list_entry(item
, struct scif_mmu_notif
, list
);
160 scif_rma_destroy_tcw(mmn
, 0, ULONG_MAX
);
164 static void __scif_rma_destroy_tcw_ep(struct scif_endpt
*ep
)
166 struct list_head
*item
, *tmp
;
167 struct scif_mmu_notif
*mmn
;
169 spin_lock(&ep
->rma_info
.tc_lock
);
170 list_for_each_safe(item
, tmp
, &ep
->rma_info
.mmn_list
) {
171 mmn
= list_entry(item
, struct scif_mmu_notif
, list
);
172 __scif_rma_destroy_tcw(mmn
, ep
, 0, ULONG_MAX
);
174 spin_unlock(&ep
->rma_info
.tc_lock
);
177 static bool scif_rma_tc_can_cache(struct scif_endpt
*ep
, size_t cur_bytes
)
179 if ((cur_bytes
>> PAGE_SHIFT
) > scif_info
.rma_tc_limit
)
181 if ((atomic_read(&ep
->rma_info
.tcw_total_pages
)
182 + (cur_bytes
>> PAGE_SHIFT
)) >
183 scif_info
.rma_tc_limit
) {
184 dev_info(scif_info
.mdev
.this_device
,
185 "%s %d total=%d, current=%zu reached max\n",
187 atomic_read(&ep
->rma_info
.tcw_total_pages
),
188 (1 + (cur_bytes
>> PAGE_SHIFT
)));
189 scif_rma_destroy_tcw_invalid();
190 __scif_rma_destroy_tcw_ep(ep
);
195 static void scif_mmu_notifier_release(struct mmu_notifier
*mn
,
196 struct mm_struct
*mm
)
198 struct scif_mmu_notif
*mmn
;
200 mmn
= container_of(mn
, struct scif_mmu_notif
, ep_mmu_notifier
);
201 scif_rma_destroy_tcw(mmn
, 0, ULONG_MAX
);
202 schedule_work(&scif_info
.misc_work
);
205 static void scif_mmu_notifier_invalidate_page(struct mmu_notifier
*mn
,
206 struct mm_struct
*mm
,
207 unsigned long address
)
209 struct scif_mmu_notif
*mmn
;
211 mmn
= container_of(mn
, struct scif_mmu_notif
, ep_mmu_notifier
);
212 scif_rma_destroy_tcw(mmn
, address
, PAGE_SIZE
);
215 static void scif_mmu_notifier_invalidate_range_start(struct mmu_notifier
*mn
,
216 struct mm_struct
*mm
,
220 struct scif_mmu_notif
*mmn
;
222 mmn
= container_of(mn
, struct scif_mmu_notif
, ep_mmu_notifier
);
223 scif_rma_destroy_tcw(mmn
, start
, end
- start
);
226 static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier
*mn
,
227 struct mm_struct
*mm
,
232 * Nothing to do here, everything needed was done in
233 * invalidate_range_start.
237 static const struct mmu_notifier_ops scif_mmu_notifier_ops
= {
238 .release
= scif_mmu_notifier_release
,
239 .clear_flush_young
= NULL
,
240 .invalidate_page
= scif_mmu_notifier_invalidate_page
,
241 .invalidate_range_start
= scif_mmu_notifier_invalidate_range_start
,
242 .invalidate_range_end
= scif_mmu_notifier_invalidate_range_end
};
244 static void scif_ep_unregister_mmu_notifier(struct scif_endpt
*ep
)
246 struct scif_endpt_rma_info
*rma
= &ep
->rma_info
;
247 struct scif_mmu_notif
*mmn
= NULL
;
248 struct list_head
*item
, *tmp
;
250 mutex_lock(&ep
->rma_info
.mmn_lock
);
251 list_for_each_safe(item
, tmp
, &rma
->mmn_list
) {
252 mmn
= list_entry(item
, struct scif_mmu_notif
, list
);
253 mmu_notifier_unregister(&mmn
->ep_mmu_notifier
, mmn
->mm
);
257 mutex_unlock(&ep
->rma_info
.mmn_lock
);
260 static void scif_init_mmu_notifier(struct scif_mmu_notif
*mmn
,
261 struct mm_struct
*mm
, struct scif_endpt
*ep
)
265 mmn
->ep_mmu_notifier
.ops
= &scif_mmu_notifier_ops
;
266 INIT_LIST_HEAD(&mmn
->list
);
267 INIT_LIST_HEAD(&mmn
->tc_reg_list
);
270 static struct scif_mmu_notif
*
271 scif_find_mmu_notifier(struct mm_struct
*mm
, struct scif_endpt_rma_info
*rma
)
273 struct scif_mmu_notif
*mmn
;
274 struct list_head
*item
;
276 list_for_each(item
, &rma
->mmn_list
) {
277 mmn
= list_entry(item
, struct scif_mmu_notif
, list
);
284 static struct scif_mmu_notif
*
285 scif_add_mmu_notifier(struct mm_struct
*mm
, struct scif_endpt
*ep
)
287 struct scif_mmu_notif
*mmn
288 = kzalloc(sizeof(*mmn
), GFP_KERNEL
);
291 return ERR_PTR(ENOMEM
);
293 scif_init_mmu_notifier(mmn
, current
->mm
, ep
);
294 if (mmu_notifier_register(&mmn
->ep_mmu_notifier
,
297 return ERR_PTR(EBUSY
);
299 list_add(&mmn
->list
, &ep
->rma_info
.mmn_list
);
304 * Called from the misc thread to destroy temporary cached windows and
305 * unregister the MMU notifier for the SCIF endpoint.
307 void scif_mmu_notif_handler(struct work_struct
*work
)
309 struct list_head
*pos
, *tmpq
;
310 struct scif_endpt
*ep
;
312 scif_rma_destroy_tcw_invalid();
313 spin_lock(&scif_info
.rmalock
);
314 list_for_each_safe(pos
, tmpq
, &scif_info
.mmu_notif_cleanup
) {
315 ep
= list_entry(pos
, struct scif_endpt
, mmu_list
);
316 list_del(&ep
->mmu_list
);
317 spin_unlock(&scif_info
.rmalock
);
318 scif_rma_destroy_tcw_ep(ep
);
319 scif_ep_unregister_mmu_notifier(ep
);
322 spin_unlock(&scif_info
.rmalock
);
325 static bool scif_is_set_reg_cache(int flags
)
327 return !!(flags
& SCIF_RMA_USECACHE
);
330 static struct scif_mmu_notif
*
331 scif_find_mmu_notifier(struct mm_struct
*mm
,
332 struct scif_endpt_rma_info
*rma
)
337 static struct scif_mmu_notif
*
338 scif_add_mmu_notifier(struct mm_struct
*mm
, struct scif_endpt
*ep
)
343 void scif_mmu_notif_handler(struct work_struct
*work
)
347 static bool scif_is_set_reg_cache(int flags
)
352 static bool scif_rma_tc_can_cache(struct scif_endpt
*ep
, size_t cur_bytes
)
359 * scif_register_temp:
360 * @epd: End Point Descriptor.
361 * @addr: virtual address to/from which to copy
362 * @len: length of range to copy
363 * @out_offset: computed offset returned by reference.
364 * @out_window: allocated registered window returned by reference.
366 * Create a temporary registered window. The peer will not know about this
367 * window. This API is used for scif_vreadfrom()/scif_vwriteto() API's.
370 scif_register_temp(scif_epd_t epd
, unsigned long addr
, size_t len
, int prot
,
371 off_t
*out_offset
, struct scif_window
**out_window
)
373 struct scif_endpt
*ep
= (struct scif_endpt
*)epd
;
375 scif_pinned_pages_t pinned_pages
;
378 aligned_len
= ALIGN(len
, PAGE_SIZE
);
380 err
= __scif_pin_pages((void *)(addr
& PAGE_MASK
),
381 aligned_len
, &prot
, 0, &pinned_pages
);
385 pinned_pages
->prot
= prot
;
387 /* Compute the offset for this registration */
388 err
= scif_get_window_offset(ep
, 0, 0,
389 aligned_len
>> PAGE_SHIFT
,
394 /* Allocate and prepare self registration window */
395 *out_window
= scif_create_window(ep
, aligned_len
>> PAGE_SHIFT
,
398 scif_free_window_offset(ep
, NULL
, *out_offset
);
403 (*out_window
)->pinned_pages
= pinned_pages
;
404 (*out_window
)->nr_pages
= pinned_pages
->nr_pages
;
405 (*out_window
)->prot
= pinned_pages
->prot
;
407 (*out_window
)->va_for_temp
= addr
& PAGE_MASK
;
408 err
= scif_map_window(ep
->remote_dev
, *out_window
);
410 /* Something went wrong! Rollback */
411 scif_destroy_window(ep
, *out_window
);
414 *out_offset
|= (addr
- (*out_window
)->va_for_temp
);
419 dev_err(&ep
->remote_dev
->sdev
->dev
,
420 "%s %d err %d\n", __func__
, __LINE__
, err
);
421 scif_unpin_pages(pinned_pages
);
425 #define SCIF_DMA_TO (3 * HZ)
428 * scif_sync_dma - Program a DMA without an interrupt descriptor
430 * @dev - The address of the pointer to the device instance used
431 * for DMA registration.
432 * @chan - DMA channel to be used.
433 * @sync_wait: Wait for DMA to complete?
435 * Return 0 on success and -errno on error.
437 static int scif_sync_dma(struct scif_hw_dev
*sdev
, struct dma_chan
*chan
,
441 struct dma_async_tx_descriptor
*tx
= NULL
;
442 enum dma_ctrl_flags flags
= DMA_PREP_FENCE
;
444 struct dma_device
*ddev
;
448 dev_err(&sdev
->dev
, "%s %d err %d\n",
449 __func__
, __LINE__
, err
);
454 tx
= ddev
->device_prep_dma_memcpy(chan
, 0, 0, 0, flags
);
457 dev_err(&sdev
->dev
, "%s %d err %d\n",
458 __func__
, __LINE__
, err
);
461 cookie
= tx
->tx_submit(tx
);
463 if (dma_submit_error(cookie
)) {
465 dev_err(&sdev
->dev
, "%s %d err %d\n",
466 __func__
, __LINE__
, err
);
470 dma_async_issue_pending(chan
);
472 if (dma_sync_wait(chan
, cookie
) == DMA_COMPLETE
) {
476 dev_err(&sdev
->dev
, "%s %d err %d\n",
477 __func__
, __LINE__
, err
);
484 static void scif_dma_callback(void *arg
)
486 struct completion
*done
= (struct completion
*)arg
;
491 #define SCIF_DMA_SYNC_WAIT true
492 #define SCIF_DMA_POLL BIT(0)
493 #define SCIF_DMA_INTR BIT(1)
496 * scif_async_dma - Program a DMA with an interrupt descriptor
498 * @dev - The address of the pointer to the device instance used
499 * for DMA registration.
500 * @chan - DMA channel to be used.
501 * Return 0 on success and -errno on error.
503 static int scif_async_dma(struct scif_hw_dev
*sdev
, struct dma_chan
*chan
)
506 struct dma_device
*ddev
;
507 struct dma_async_tx_descriptor
*tx
= NULL
;
508 enum dma_ctrl_flags flags
= DMA_PREP_INTERRUPT
| DMA_PREP_FENCE
;
509 DECLARE_COMPLETION_ONSTACK(done_wait
);
511 enum dma_status status
;
515 dev_err(&sdev
->dev
, "%s %d err %d\n",
516 __func__
, __LINE__
, err
);
521 tx
= ddev
->device_prep_dma_memcpy(chan
, 0, 0, 0, flags
);
524 dev_err(&sdev
->dev
, "%s %d err %d\n",
525 __func__
, __LINE__
, err
);
528 reinit_completion(&done_wait
);
529 tx
->callback
= scif_dma_callback
;
530 tx
->callback_param
= &done_wait
;
531 cookie
= tx
->tx_submit(tx
);
533 if (dma_submit_error(cookie
)) {
535 dev_err(&sdev
->dev
, "%s %d err %d\n",
536 __func__
, __LINE__
, err
);
539 dma_async_issue_pending(chan
);
541 err
= wait_for_completion_timeout(&done_wait
, SCIF_DMA_TO
);
544 dev_err(&sdev
->dev
, "%s %d err %d\n",
545 __func__
, __LINE__
, err
);
549 status
= dma_async_is_tx_complete(chan
, cookie
, NULL
, NULL
);
550 if (status
!= DMA_COMPLETE
) {
552 dev_err(&sdev
->dev
, "%s %d err %d\n",
553 __func__
, __LINE__
, err
);
561 * scif_drain_dma_poll - Drain all outstanding DMA operations for a particular
562 * DMA channel via polling.
564 * @sdev - The SCIF device
565 * @chan - DMA channel
566 * Return 0 on success and -errno on error.
568 static int scif_drain_dma_poll(struct scif_hw_dev
*sdev
, struct dma_chan
*chan
)
572 return scif_sync_dma(sdev
, chan
, SCIF_DMA_SYNC_WAIT
);
576 * scif_drain_dma_intr - Drain all outstanding DMA operations for a particular
577 * DMA channel via interrupt based blocking wait.
579 * @sdev - The SCIF device
580 * @chan - DMA channel
581 * Return 0 on success and -errno on error.
583 int scif_drain_dma_intr(struct scif_hw_dev
*sdev
, struct dma_chan
*chan
)
587 return scif_async_dma(sdev
, chan
);
591 * scif_rma_destroy_windows:
593 * This routine destroys all windows queued for cleanup
595 void scif_rma_destroy_windows(void)
597 struct list_head
*item
, *tmp
;
598 struct scif_window
*window
;
599 struct scif_endpt
*ep
;
600 struct dma_chan
*chan
;
604 spin_lock(&scif_info
.rmalock
);
605 list_for_each_safe(item
, tmp
, &scif_info
.rma
) {
606 window
= list_entry(item
, struct scif_window
,
608 ep
= (struct scif_endpt
*)window
->ep
;
609 chan
= ep
->rma_info
.dma_chan
;
611 list_del_init(&window
->list
);
612 spin_unlock(&scif_info
.rmalock
);
613 if (!chan
|| !scifdev_alive(ep
) ||
614 !scif_drain_dma_intr(ep
->remote_dev
->sdev
,
615 ep
->rma_info
.dma_chan
))
616 /* Remove window from global list */
617 window
->unreg_state
= OP_COMPLETED
;
619 dev_warn(&ep
->remote_dev
->sdev
->dev
,
620 "DMA engine hung?\n");
621 if (window
->unreg_state
== OP_COMPLETED
) {
622 if (window
->type
== SCIF_WINDOW_SELF
)
623 scif_destroy_window(ep
, window
);
625 scif_destroy_remote_window(window
);
626 atomic_dec(&ep
->rma_info
.tw_refcount
);
630 spin_unlock(&scif_info
.rmalock
);
634 * scif_rma_destroy_tcw:
636 * This routine destroys temporary cached registered windows
637 * which have been queued for cleanup.
639 void scif_rma_destroy_tcw_invalid(void)
641 struct list_head
*item
, *tmp
;
642 struct scif_window
*window
;
643 struct scif_endpt
*ep
;
644 struct dma_chan
*chan
;
648 spin_lock(&scif_info
.rmalock
);
649 list_for_each_safe(item
, tmp
, &scif_info
.rma_tc
) {
650 window
= list_entry(item
, struct scif_window
, list
);
651 ep
= (struct scif_endpt
*)window
->ep
;
652 chan
= ep
->rma_info
.dma_chan
;
653 list_del_init(&window
->list
);
654 spin_unlock(&scif_info
.rmalock
);
655 mutex_lock(&ep
->rma_info
.rma_lock
);
656 if (!chan
|| !scifdev_alive(ep
) ||
657 !scif_drain_dma_intr(ep
->remote_dev
->sdev
,
658 ep
->rma_info
.dma_chan
)) {
659 atomic_sub(window
->nr_pages
,
660 &ep
->rma_info
.tcw_total_pages
);
661 scif_destroy_window(ep
, window
);
662 atomic_dec(&ep
->rma_info
.tcw_refcount
);
664 dev_warn(&ep
->remote_dev
->sdev
->dev
,
665 "DMA engine hung?\n");
667 mutex_unlock(&ep
->rma_info
.rma_lock
);
670 spin_unlock(&scif_info
.rmalock
);
674 void *_get_local_va(off_t off
, struct scif_window
*window
, size_t len
)
676 int page_nr
= (off
- window
->offset
) >> PAGE_SHIFT
;
677 off_t page_off
= off
& ~PAGE_MASK
;
680 if (window
->type
== SCIF_WINDOW_SELF
) {
681 struct page
**pages
= window
->pinned_pages
->pages
;
683 va
= page_address(pages
[page_nr
]) + page_off
;
689 void *ioremap_remote(off_t off
, struct scif_window
*window
,
690 size_t len
, struct scif_dev
*dev
,
691 struct scif_window_iter
*iter
)
693 dma_addr_t phys
= scif_off_to_dma_addr(window
, off
, NULL
, iter
);
696 * If the DMA address is not card relative then we need the DMA
697 * addresses to be an offset into the bar. The aperture base was already
698 * added so subtract it here since scif_ioremap is going to add it again
700 if (!scifdev_self(dev
) && window
->type
== SCIF_WINDOW_PEER
&&
701 dev
->sdev
->aper
&& !dev
->sdev
->card_rel_da
)
702 phys
= phys
- dev
->sdev
->aper
->pa
;
703 return scif_ioremap(phys
, len
, dev
);
707 iounmap_remote(void *virt
, size_t size
, struct scif_copy_work
*work
)
709 scif_iounmap(virt
, size
, work
->remote_dev
);
713 * Takes care of ordering issue caused by
714 * 1. Hardware: Only in the case of cpu copy from mgmt node to card
715 * because of WC memory.
716 * 2. Software: If memcpy reorders copy instructions for optimization.
717 * This could happen at both mgmt node and card.
720 scif_ordered_memcpy_toio(char *dst
, const char *src
, size_t count
)
725 memcpy_toio((void __iomem __force
*)dst
, src
, --count
);
726 /* Order the last byte with the previous stores */
728 *(dst
+ count
) = *(src
+ count
);
731 static inline void scif_unaligned_cpy_toio(char *dst
, const char *src
,
732 size_t count
, bool ordered
)
735 scif_ordered_memcpy_toio(dst
, src
, count
);
737 memcpy_toio((void __iomem __force
*)dst
, src
, count
);
741 void scif_ordered_memcpy_fromio(char *dst
, const char *src
, size_t count
)
746 memcpy_fromio(dst
, (void __iomem __force
*)src
, --count
);
747 /* Order the last byte with the previous loads */
749 *(dst
+ count
) = *(src
+ count
);
752 static inline void scif_unaligned_cpy_fromio(char *dst
, const char *src
,
753 size_t count
, bool ordered
)
756 scif_ordered_memcpy_fromio(dst
, src
, count
);
758 memcpy_fromio(dst
, (void __iomem __force
*)src
, count
);
761 #define SCIF_RMA_ERROR_CODE (~(dma_addr_t)0x0)
764 * scif_off_to_dma_addr:
765 * Obtain the dma_addr given the window and the offset.
766 * @window: Registered window.
767 * @off: Window offset.
768 * @nr_bytes: Return the number of contiguous bytes till next DMA addr index.
769 * @index: Return the index of the dma_addr array found.
770 * @start_off: start offset of index of the dma addr array found.
771 * The nr_bytes provides the callee an estimate of the maximum possible
772 * DMA xfer possible while the index/start_off provide faster lookups
773 * for the next iteration.
775 dma_addr_t
scif_off_to_dma_addr(struct scif_window
*window
, s64 off
,
776 size_t *nr_bytes
, struct scif_window_iter
*iter
)
782 if (window
->nr_pages
== window
->nr_contig_chunks
) {
783 page_nr
= (off
- window
->offset
) >> PAGE_SHIFT
;
784 page_off
= off
& ~PAGE_MASK
;
787 *nr_bytes
= PAGE_SIZE
- page_off
;
788 return window
->dma_addr
[page_nr
] | page_off
;
792 start
= iter
->offset
;
795 start
= window
->offset
;
797 for (; i
< window
->nr_contig_chunks
; i
++) {
798 end
= start
+ (window
->num_pages
[i
] << PAGE_SHIFT
);
799 if (off
>= start
&& off
< end
) {
802 iter
->offset
= start
;
805 *nr_bytes
= end
- off
;
806 return (window
->dma_addr
[i
] + (off
- start
));
808 start
+= (window
->num_pages
[i
] << PAGE_SHIFT
);
810 dev_err(scif_info
.mdev
.this_device
,
811 "%s %d BUG. Addr not found? window %p off 0x%llx\n",
812 __func__
, __LINE__
, window
, off
);
813 return SCIF_RMA_ERROR_CODE
;
817 * Copy between rma window and temporary buffer
819 static void scif_rma_local_cpu_copy(s64 offset
, struct scif_window
*window
,
820 u8
*temp
, size_t rem_len
, bool to_temp
)
827 offset_in_page
= offset
& ~PAGE_MASK
;
828 loop_len
= PAGE_SIZE
- offset_in_page
;
830 if (rem_len
< loop_len
)
833 window_virt
= _get_local_va(offset
, window
, loop_len
);
837 memcpy(temp
, window_virt
, loop_len
);
839 memcpy(window_virt
, temp
, loop_len
);
845 end_offset
= window
->offset
+
846 (window
->nr_pages
<< PAGE_SHIFT
);
848 if (offset
== end_offset
) {
849 window
= list_next_entry(window
, list
);
850 end_offset
= window
->offset
+
851 (window
->nr_pages
<< PAGE_SHIFT
);
853 loop_len
= min(PAGE_SIZE
, rem_len
);
854 window_virt
= _get_local_va(offset
, window
, loop_len
);
858 memcpy(temp
, window_virt
, loop_len
);
860 memcpy(window_virt
, temp
, loop_len
);
868 * scif_rma_completion_cb:
871 * RMA interrupt completion callback.
873 static void scif_rma_completion_cb(void *data
)
875 struct scif_dma_comp_cb
*comp_cb
= data
;
877 /* Free DMA Completion CB. */
878 if (comp_cb
->dst_window
)
879 scif_rma_local_cpu_copy(comp_cb
->dst_offset
,
882 comp_cb
->header_padding
,
883 comp_cb
->len
, false);
884 scif_unmap_single(comp_cb
->temp_phys
, comp_cb
->sdev
,
885 SCIF_KMEM_UNALIGNED_BUF_SIZE
);
886 if (comp_cb
->is_cache
)
887 kmem_cache_free(unaligned_cache
,
888 comp_cb
->temp_buf_to_free
);
890 kfree(comp_cb
->temp_buf_to_free
);
893 /* Copies between temporary buffer and offsets provided in work */
895 scif_rma_list_dma_copy_unaligned(struct scif_copy_work
*work
,
896 u8
*temp
, struct dma_chan
*chan
,
899 struct scif_dma_comp_cb
*comp_cb
= work
->comp_cb
;
900 dma_addr_t window_dma_addr
, temp_dma_addr
;
901 dma_addr_t temp_phys
= comp_cb
->temp_phys
;
902 size_t loop_len
, nr_contig_bytes
= 0, remaining_len
= work
->len
;
903 int offset_in_ca
, ret
= 0;
904 s64 end_offset
, offset
;
905 struct scif_window
*window
;
906 void *window_virt_addr
;
908 struct dma_async_tx_descriptor
*tx
;
909 struct dma_device
*dev
= chan
->device
;
913 offset
= work
->dst_offset
;
914 window
= work
->dst_window
;
916 offset
= work
->src_offset
;
917 window
= work
->src_window
;
920 offset_in_ca
= offset
& (L1_CACHE_BYTES
- 1);
922 loop_len
= L1_CACHE_BYTES
- offset_in_ca
;
923 loop_len
= min(loop_len
, remaining_len
);
924 window_virt_addr
= ioremap_remote(offset
, window
,
928 if (!window_virt_addr
)
931 scif_unaligned_cpy_toio(window_virt_addr
, temp
,
934 !(remaining_len
- loop_len
));
936 scif_unaligned_cpy_fromio(temp
, window_virt_addr
,
937 loop_len
, work
->ordered
&&
938 !(remaining_len
- loop_len
));
939 iounmap_remote(window_virt_addr
, loop_len
, work
);
943 temp_phys
+= loop_len
;
944 remaining_len
-= loop_len
;
947 offset_in_ca
= offset
& ~PAGE_MASK
;
948 end_offset
= window
->offset
+
949 (window
->nr_pages
<< PAGE_SHIFT
);
951 tail_len
= remaining_len
& (L1_CACHE_BYTES
- 1);
952 remaining_len
-= tail_len
;
953 while (remaining_len
) {
954 if (offset
== end_offset
) {
955 window
= list_next_entry(window
, list
);
956 end_offset
= window
->offset
+
957 (window
->nr_pages
<< PAGE_SHIFT
);
959 if (scif_is_mgmt_node())
960 temp_dma_addr
= temp_phys
;
962 /* Fix if we ever enable IOMMU on the card */
963 temp_dma_addr
= (dma_addr_t
)virt_to_phys(temp
);
964 window_dma_addr
= scif_off_to_dma_addr(window
, offset
,
967 loop_len
= min(nr_contig_bytes
, remaining_len
);
969 if (work
->ordered
&& !tail_len
&&
970 !(remaining_len
- loop_len
) &&
971 loop_len
!= L1_CACHE_BYTES
) {
973 * Break up the last chunk of the transfer into
974 * two steps. if there is no tail to guarantee
975 * DMA ordering. SCIF_DMA_POLLING inserts
976 * a status update descriptor in step 1 which
977 * acts as a double sided synchronization fence
978 * for the DMA engine to ensure that the last
979 * cache line in step 2 is updated last.
981 /* Step 1) DMA: Body Length - L1_CACHE_BYTES. */
983 dev
->device_prep_dma_memcpy(chan
,
993 cookie
= tx
->tx_submit(tx
);
994 if (dma_submit_error(cookie
)) {
998 dma_async_issue_pending(chan
);
999 offset
+= (loop_len
- L1_CACHE_BYTES
);
1000 temp_dma_addr
+= (loop_len
- L1_CACHE_BYTES
);
1001 window_dma_addr
+= (loop_len
- L1_CACHE_BYTES
);
1002 remaining_len
-= (loop_len
- L1_CACHE_BYTES
);
1003 loop_len
= remaining_len
;
1005 /* Step 2) DMA: L1_CACHE_BYTES */
1007 dev
->device_prep_dma_memcpy(chan
,
1015 cookie
= tx
->tx_submit(tx
);
1016 if (dma_submit_error(cookie
)) {
1020 dma_async_issue_pending(chan
);
1023 dev
->device_prep_dma_memcpy(chan
,
1031 cookie
= tx
->tx_submit(tx
);
1032 if (dma_submit_error(cookie
)) {
1036 dma_async_issue_pending(chan
);
1039 tx
= dev
->device_prep_dma_memcpy(chan
, temp_dma_addr
,
1040 window_dma_addr
, loop_len
, 0);
1045 cookie
= tx
->tx_submit(tx
);
1046 if (dma_submit_error(cookie
)) {
1050 dma_async_issue_pending(chan
);
1056 temp_phys
+= loop_len
;
1057 remaining_len
-= loop_len
;
1061 if (offset
== end_offset
) {
1062 window
= list_next_entry(window
, list
);
1063 end_offset
= window
->offset
+
1064 (window
->nr_pages
<< PAGE_SHIFT
);
1066 window_virt_addr
= ioremap_remote(offset
, window
, tail_len
,
1069 if (!window_virt_addr
)
1072 * The CPU copy for the tail bytes must be initiated only once
1073 * previous DMA transfers for this endpoint have completed
1074 * to guarantee ordering.
1076 if (work
->ordered
) {
1077 struct scif_dev
*rdev
= work
->remote_dev
;
1079 ret
= scif_drain_dma_intr(rdev
->sdev
, chan
);
1084 scif_unaligned_cpy_toio(window_virt_addr
, temp
,
1085 tail_len
, work
->ordered
);
1087 scif_unaligned_cpy_fromio(temp
, window_virt_addr
,
1088 tail_len
, work
->ordered
);
1089 iounmap_remote(window_virt_addr
, tail_len
, work
);
1091 tx
= dev
->device_prep_dma_memcpy(chan
, 0, 0, 0, DMA_PREP_INTERRUPT
);
1096 tx
->callback
= &scif_rma_completion_cb
;
1097 tx
->callback_param
= comp_cb
;
1098 cookie
= tx
->tx_submit(tx
);
1100 if (dma_submit_error(cookie
)) {
1104 dma_async_issue_pending(chan
);
1107 dev_err(scif_info
.mdev
.this_device
,
1108 "%s %d Desc Prog Failed ret %d\n",
1109 __func__
, __LINE__
, ret
);
1114 * _scif_rma_list_dma_copy_aligned:
1116 * Traverse all the windows and perform DMA copy.
1118 static int _scif_rma_list_dma_copy_aligned(struct scif_copy_work
*work
,
1119 struct dma_chan
*chan
)
1121 dma_addr_t src_dma_addr
, dst_dma_addr
;
1122 size_t loop_len
, remaining_len
, src_contig_bytes
= 0;
1123 size_t dst_contig_bytes
= 0;
1124 struct scif_window_iter src_win_iter
;
1125 struct scif_window_iter dst_win_iter
;
1126 s64 end_src_offset
, end_dst_offset
;
1127 struct scif_window
*src_window
= work
->src_window
;
1128 struct scif_window
*dst_window
= work
->dst_window
;
1129 s64 src_offset
= work
->src_offset
, dst_offset
= work
->dst_offset
;
1131 struct dma_async_tx_descriptor
*tx
;
1132 struct dma_device
*dev
= chan
->device
;
1133 dma_cookie_t cookie
;
1135 remaining_len
= work
->len
;
1137 scif_init_window_iter(src_window
, &src_win_iter
);
1138 scif_init_window_iter(dst_window
, &dst_win_iter
);
1139 end_src_offset
= src_window
->offset
+
1140 (src_window
->nr_pages
<< PAGE_SHIFT
);
1141 end_dst_offset
= dst_window
->offset
+
1142 (dst_window
->nr_pages
<< PAGE_SHIFT
);
1143 while (remaining_len
) {
1144 if (src_offset
== end_src_offset
) {
1145 src_window
= list_next_entry(src_window
, list
);
1146 end_src_offset
= src_window
->offset
+
1147 (src_window
->nr_pages
<< PAGE_SHIFT
);
1148 scif_init_window_iter(src_window
, &src_win_iter
);
1150 if (dst_offset
== end_dst_offset
) {
1151 dst_window
= list_next_entry(dst_window
, list
);
1152 end_dst_offset
= dst_window
->offset
+
1153 (dst_window
->nr_pages
<< PAGE_SHIFT
);
1154 scif_init_window_iter(dst_window
, &dst_win_iter
);
1157 /* compute dma addresses for transfer */
1158 src_dma_addr
= scif_off_to_dma_addr(src_window
, src_offset
,
1161 dst_dma_addr
= scif_off_to_dma_addr(dst_window
, dst_offset
,
1164 loop_len
= min(src_contig_bytes
, dst_contig_bytes
);
1165 loop_len
= min(loop_len
, remaining_len
);
1166 if (work
->ordered
&& !(remaining_len
- loop_len
)) {
1168 * Break up the last chunk of the transfer into two
1169 * steps to ensure that the last byte in step 2 is
1172 /* Step 1) DMA: Body Length - 1 */
1173 tx
= dev
->device_prep_dma_memcpy(chan
, dst_dma_addr
,
1181 cookie
= tx
->tx_submit(tx
);
1182 if (dma_submit_error(cookie
)) {
1186 src_offset
+= (loop_len
- 1);
1187 dst_offset
+= (loop_len
- 1);
1188 src_dma_addr
+= (loop_len
- 1);
1189 dst_dma_addr
+= (loop_len
- 1);
1190 remaining_len
-= (loop_len
- 1);
1191 loop_len
= remaining_len
;
1193 /* Step 2) DMA: 1 BYTES */
1194 tx
= dev
->device_prep_dma_memcpy(chan
, dst_dma_addr
,
1195 src_dma_addr
, loop_len
, 0);
1200 cookie
= tx
->tx_submit(tx
);
1201 if (dma_submit_error(cookie
)) {
1205 dma_async_issue_pending(chan
);
1207 tx
= dev
->device_prep_dma_memcpy(chan
, dst_dma_addr
,
1208 src_dma_addr
, loop_len
, 0);
1213 cookie
= tx
->tx_submit(tx
);
1214 if (dma_submit_error(cookie
)) {
1219 src_offset
+= loop_len
;
1220 dst_offset
+= loop_len
;
1221 remaining_len
-= loop_len
;
1225 dev_err(scif_info
.mdev
.this_device
,
1226 "%s %d Desc Prog Failed ret %d\n",
1227 __func__
, __LINE__
, ret
);
1232 * scif_rma_list_dma_copy_aligned:
1234 * Traverse all the windows and perform DMA copy.
1236 static int scif_rma_list_dma_copy_aligned(struct scif_copy_work
*work
,
1237 struct dma_chan
*chan
)
1239 dma_addr_t src_dma_addr
, dst_dma_addr
;
1240 size_t loop_len
, remaining_len
, tail_len
, src_contig_bytes
= 0;
1241 size_t dst_contig_bytes
= 0;
1243 s64 end_src_offset
, end_dst_offset
;
1244 struct scif_window_iter src_win_iter
;
1245 struct scif_window_iter dst_win_iter
;
1246 void *src_virt
, *dst_virt
;
1247 struct scif_window
*src_window
= work
->src_window
;
1248 struct scif_window
*dst_window
= work
->dst_window
;
1249 s64 src_offset
= work
->src_offset
, dst_offset
= work
->dst_offset
;
1251 struct dma_async_tx_descriptor
*tx
;
1252 struct dma_device
*dev
= chan
->device
;
1253 dma_cookie_t cookie
;
1255 remaining_len
= work
->len
;
1256 scif_init_window_iter(src_window
, &src_win_iter
);
1257 scif_init_window_iter(dst_window
, &dst_win_iter
);
1259 src_cache_off
= src_offset
& (L1_CACHE_BYTES
- 1);
1260 if (src_cache_off
!= 0) {
1262 loop_len
= L1_CACHE_BYTES
- src_cache_off
;
1263 loop_len
= min(loop_len
, remaining_len
);
1264 src_dma_addr
= __scif_off_to_dma_addr(src_window
, src_offset
);
1265 dst_dma_addr
= __scif_off_to_dma_addr(dst_window
, dst_offset
);
1266 if (src_window
->type
== SCIF_WINDOW_SELF
)
1267 src_virt
= _get_local_va(src_offset
, src_window
,
1270 src_virt
= ioremap_remote(src_offset
, src_window
,
1272 work
->remote_dev
, NULL
);
1275 if (dst_window
->type
== SCIF_WINDOW_SELF
)
1276 dst_virt
= _get_local_va(dst_offset
, dst_window
,
1279 dst_virt
= ioremap_remote(dst_offset
, dst_window
,
1281 work
->remote_dev
, NULL
);
1283 if (src_window
->type
!= SCIF_WINDOW_SELF
)
1284 iounmap_remote(src_virt
, loop_len
, work
);
1287 if (src_window
->type
== SCIF_WINDOW_SELF
)
1288 scif_unaligned_cpy_toio(dst_virt
, src_virt
, loop_len
,
1289 remaining_len
== loop_len
?
1290 work
->ordered
: false);
1292 scif_unaligned_cpy_fromio(dst_virt
, src_virt
, loop_len
,
1293 remaining_len
== loop_len
?
1294 work
->ordered
: false);
1295 if (src_window
->type
!= SCIF_WINDOW_SELF
)
1296 iounmap_remote(src_virt
, loop_len
, work
);
1297 if (dst_window
->type
!= SCIF_WINDOW_SELF
)
1298 iounmap_remote(dst_virt
, loop_len
, work
);
1299 src_offset
+= loop_len
;
1300 dst_offset
+= loop_len
;
1301 remaining_len
-= loop_len
;
1304 end_src_offset
= src_window
->offset
+
1305 (src_window
->nr_pages
<< PAGE_SHIFT
);
1306 end_dst_offset
= dst_window
->offset
+
1307 (dst_window
->nr_pages
<< PAGE_SHIFT
);
1308 tail_len
= remaining_len
& (L1_CACHE_BYTES
- 1);
1309 remaining_len
-= tail_len
;
1310 while (remaining_len
) {
1311 if (src_offset
== end_src_offset
) {
1312 src_window
= list_next_entry(src_window
, list
);
1313 end_src_offset
= src_window
->offset
+
1314 (src_window
->nr_pages
<< PAGE_SHIFT
);
1315 scif_init_window_iter(src_window
, &src_win_iter
);
1317 if (dst_offset
== end_dst_offset
) {
1318 dst_window
= list_next_entry(dst_window
, list
);
1319 end_dst_offset
= dst_window
->offset
+
1320 (dst_window
->nr_pages
<< PAGE_SHIFT
);
1321 scif_init_window_iter(dst_window
, &dst_win_iter
);
1324 /* compute dma addresses for transfer */
1325 src_dma_addr
= scif_off_to_dma_addr(src_window
, src_offset
,
1328 dst_dma_addr
= scif_off_to_dma_addr(dst_window
, dst_offset
,
1331 loop_len
= min(src_contig_bytes
, dst_contig_bytes
);
1332 loop_len
= min(loop_len
, remaining_len
);
1333 if (work
->ordered
&& !tail_len
&&
1334 !(remaining_len
- loop_len
)) {
1336 * Break up the last chunk of the transfer into two
1337 * steps. if there is no tail to gurantee DMA ordering.
1338 * Passing SCIF_DMA_POLLING inserts a status update
1339 * descriptor in step 1 which acts as a double sided
1340 * synchronization fence for the DMA engine to ensure
1341 * that the last cache line in step 2 is updated last.
1343 /* Step 1) DMA: Body Length - L1_CACHE_BYTES. */
1344 tx
= dev
->device_prep_dma_memcpy(chan
, dst_dma_addr
,
1353 cookie
= tx
->tx_submit(tx
);
1354 if (dma_submit_error(cookie
)) {
1358 dma_async_issue_pending(chan
);
1359 src_offset
+= (loop_len
- L1_CACHE_BYTES
);
1360 dst_offset
+= (loop_len
- L1_CACHE_BYTES
);
1361 src_dma_addr
+= (loop_len
- L1_CACHE_BYTES
);
1362 dst_dma_addr
+= (loop_len
- L1_CACHE_BYTES
);
1363 remaining_len
-= (loop_len
- L1_CACHE_BYTES
);
1364 loop_len
= remaining_len
;
1366 /* Step 2) DMA: L1_CACHE_BYTES */
1367 tx
= dev
->device_prep_dma_memcpy(chan
, dst_dma_addr
,
1374 cookie
= tx
->tx_submit(tx
);
1375 if (dma_submit_error(cookie
)) {
1379 dma_async_issue_pending(chan
);
1381 tx
= dev
->device_prep_dma_memcpy(chan
, dst_dma_addr
,
1388 cookie
= tx
->tx_submit(tx
);
1389 if (dma_submit_error(cookie
)) {
1393 dma_async_issue_pending(chan
);
1395 src_offset
+= loop_len
;
1396 dst_offset
+= loop_len
;
1397 remaining_len
-= loop_len
;
1399 remaining_len
= tail_len
;
1400 if (remaining_len
) {
1401 loop_len
= remaining_len
;
1402 if (src_offset
== end_src_offset
)
1403 src_window
= list_next_entry(src_window
, list
);
1404 if (dst_offset
== end_dst_offset
)
1405 dst_window
= list_next_entry(dst_window
, list
);
1407 src_dma_addr
= __scif_off_to_dma_addr(src_window
, src_offset
);
1408 dst_dma_addr
= __scif_off_to_dma_addr(dst_window
, dst_offset
);
1410 * The CPU copy for the tail bytes must be initiated only once
1411 * previous DMA transfers for this endpoint have completed to
1412 * guarantee ordering.
1414 if (work
->ordered
) {
1415 struct scif_dev
*rdev
= work
->remote_dev
;
1417 ret
= scif_drain_dma_poll(rdev
->sdev
, chan
);
1421 if (src_window
->type
== SCIF_WINDOW_SELF
)
1422 src_virt
= _get_local_va(src_offset
, src_window
,
1425 src_virt
= ioremap_remote(src_offset
, src_window
,
1427 work
->remote_dev
, NULL
);
1431 if (dst_window
->type
== SCIF_WINDOW_SELF
)
1432 dst_virt
= _get_local_va(dst_offset
, dst_window
,
1435 dst_virt
= ioremap_remote(dst_offset
, dst_window
,
1437 work
->remote_dev
, NULL
);
1439 if (src_window
->type
!= SCIF_WINDOW_SELF
)
1440 iounmap_remote(src_virt
, loop_len
, work
);
1444 if (src_window
->type
== SCIF_WINDOW_SELF
)
1445 scif_unaligned_cpy_toio(dst_virt
, src_virt
, loop_len
,
1448 scif_unaligned_cpy_fromio(dst_virt
, src_virt
,
1449 loop_len
, work
->ordered
);
1450 if (src_window
->type
!= SCIF_WINDOW_SELF
)
1451 iounmap_remote(src_virt
, loop_len
, work
);
1453 if (dst_window
->type
!= SCIF_WINDOW_SELF
)
1454 iounmap_remote(dst_virt
, loop_len
, work
);
1455 remaining_len
-= loop_len
;
1459 dev_err(scif_info
.mdev
.this_device
,
1460 "%s %d Desc Prog Failed ret %d\n",
1461 __func__
, __LINE__
, ret
);
1466 * scif_rma_list_cpu_copy:
1468 * Traverse all the windows and perform CPU copy.
1470 static int scif_rma_list_cpu_copy(struct scif_copy_work
*work
)
1472 void *src_virt
, *dst_virt
;
1473 size_t loop_len
, remaining_len
;
1474 int src_page_off
, dst_page_off
;
1475 s64 src_offset
= work
->src_offset
, dst_offset
= work
->dst_offset
;
1476 struct scif_window
*src_window
= work
->src_window
;
1477 struct scif_window
*dst_window
= work
->dst_window
;
1478 s64 end_src_offset
, end_dst_offset
;
1480 struct scif_window_iter src_win_iter
;
1481 struct scif_window_iter dst_win_iter
;
1483 remaining_len
= work
->len
;
1485 scif_init_window_iter(src_window
, &src_win_iter
);
1486 scif_init_window_iter(dst_window
, &dst_win_iter
);
1487 while (remaining_len
) {
1488 src_page_off
= src_offset
& ~PAGE_MASK
;
1489 dst_page_off
= dst_offset
& ~PAGE_MASK
;
1490 loop_len
= min(PAGE_SIZE
-
1491 max(src_page_off
, dst_page_off
),
1494 if (src_window
->type
== SCIF_WINDOW_SELF
)
1495 src_virt
= _get_local_va(src_offset
, src_window
,
1498 src_virt
= ioremap_remote(src_offset
, src_window
,
1507 if (dst_window
->type
== SCIF_WINDOW_SELF
)
1508 dst_virt
= _get_local_va(dst_offset
, dst_window
,
1511 dst_virt
= ioremap_remote(dst_offset
, dst_window
,
1516 if (src_window
->type
== SCIF_WINDOW_PEER
)
1517 iounmap_remote(src_virt
, loop_len
, work
);
1522 if (work
->loopback
) {
1523 memcpy(dst_virt
, src_virt
, loop_len
);
1525 if (src_window
->type
== SCIF_WINDOW_SELF
)
1526 memcpy_toio((void __iomem __force
*)dst_virt
,
1527 src_virt
, loop_len
);
1529 memcpy_fromio(dst_virt
,
1530 (void __iomem __force
*)src_virt
,
1533 if (src_window
->type
== SCIF_WINDOW_PEER
)
1534 iounmap_remote(src_virt
, loop_len
, work
);
1536 if (dst_window
->type
== SCIF_WINDOW_PEER
)
1537 iounmap_remote(dst_virt
, loop_len
, work
);
1539 src_offset
+= loop_len
;
1540 dst_offset
+= loop_len
;
1541 remaining_len
-= loop_len
;
1542 if (remaining_len
) {
1543 end_src_offset
= src_window
->offset
+
1544 (src_window
->nr_pages
<< PAGE_SHIFT
);
1545 end_dst_offset
= dst_window
->offset
+
1546 (dst_window
->nr_pages
<< PAGE_SHIFT
);
1547 if (src_offset
== end_src_offset
) {
1548 src_window
= list_next_entry(src_window
, list
);
1549 scif_init_window_iter(src_window
,
1552 if (dst_offset
== end_dst_offset
) {
1553 dst_window
= list_next_entry(dst_window
, list
);
1554 scif_init_window_iter(dst_window
,
1563 static int scif_rma_list_dma_copy_wrapper(struct scif_endpt
*epd
,
1564 struct scif_copy_work
*work
,
1565 struct dma_chan
*chan
, off_t loffset
)
1567 int src_cache_off
, dst_cache_off
;
1568 s64 src_offset
= work
->src_offset
, dst_offset
= work
->dst_offset
;
1570 bool src_local
= true, dst_local
= false;
1571 struct scif_dma_comp_cb
*comp_cb
;
1572 dma_addr_t src_dma_addr
, dst_dma_addr
;
1575 if (is_dma_copy_aligned(chan
->device
, 1, 1, 1))
1576 return _scif_rma_list_dma_copy_aligned(work
, chan
);
1578 src_cache_off
= src_offset
& (L1_CACHE_BYTES
- 1);
1579 dst_cache_off
= dst_offset
& (L1_CACHE_BYTES
- 1);
1581 if (dst_cache_off
== src_cache_off
)
1582 return scif_rma_list_dma_copy_aligned(work
, chan
);
1585 return scif_rma_list_cpu_copy(work
);
1586 src_dma_addr
= __scif_off_to_dma_addr(work
->src_window
, src_offset
);
1587 dst_dma_addr
= __scif_off_to_dma_addr(work
->dst_window
, dst_offset
);
1588 src_local
= work
->src_window
->type
== SCIF_WINDOW_SELF
;
1589 dst_local
= work
->dst_window
->type
== SCIF_WINDOW_SELF
;
1591 dst_local
= dst_local
;
1592 /* Allocate dma_completion cb */
1593 comp_cb
= kzalloc(sizeof(*comp_cb
), GFP_KERNEL
);
1597 work
->comp_cb
= comp_cb
;
1598 comp_cb
->cb_cookie
= comp_cb
;
1599 comp_cb
->dma_completion_func
= &scif_rma_completion_cb
;
1601 if (work
->len
+ (L1_CACHE_BYTES
<< 1) < SCIF_KMEM_UNALIGNED_BUF_SIZE
) {
1602 comp_cb
->is_cache
= false;
1603 /* Allocate padding bytes to align to a cache line */
1604 temp
= kmalloc(work
->len
+ (L1_CACHE_BYTES
<< 1),
1608 comp_cb
->temp_buf_to_free
= temp
;
1609 /* kmalloc(..) does not guarantee cache line alignment */
1610 if (!IS_ALIGNED((u64
)temp
, L1_CACHE_BYTES
))
1611 temp
= PTR_ALIGN(temp
, L1_CACHE_BYTES
);
1613 comp_cb
->is_cache
= true;
1614 temp
= kmem_cache_alloc(unaligned_cache
, GFP_KERNEL
);
1617 comp_cb
->temp_buf_to_free
= temp
;
1621 temp
+= dst_cache_off
;
1622 scif_rma_local_cpu_copy(work
->src_offset
, work
->src_window
,
1623 temp
, work
->len
, true);
1625 comp_cb
->dst_window
= work
->dst_window
;
1626 comp_cb
->dst_offset
= work
->dst_offset
;
1627 work
->src_offset
= work
->src_offset
- src_cache_off
;
1628 comp_cb
->len
= work
->len
;
1629 work
->len
= ALIGN(work
->len
+ src_cache_off
, L1_CACHE_BYTES
);
1630 comp_cb
->header_padding
= src_cache_off
;
1632 comp_cb
->temp_buf
= temp
;
1634 err
= scif_map_single(&comp_cb
->temp_phys
, temp
,
1635 work
->remote_dev
, SCIF_KMEM_UNALIGNED_BUF_SIZE
);
1638 comp_cb
->sdev
= work
->remote_dev
;
1639 if (scif_rma_list_dma_copy_unaligned(work
, temp
, chan
, src_local
) < 0)
1642 work
->fence_type
= SCIF_DMA_INTR
;
1645 if (comp_cb
->is_cache
)
1646 kmem_cache_free(unaligned_cache
, comp_cb
->temp_buf_to_free
);
1648 kfree(comp_cb
->temp_buf_to_free
);
1657 * @epd: end point descriptor.
1658 * @loffset: offset in local registered address space to/from which to copy
1659 * @addr: user virtual address to/from which to copy
1660 * @len: length of range to copy
1661 * @roffset: offset in remote registered address space to/from which to copy
1663 * @dir: LOCAL->REMOTE or vice versa.
1664 * @last_chunk: true if this is the last chunk of a larger transfer
1666 * Validate parameters, check if src/dst registered ranges requested for copy
1667 * are valid and initiate either CPU or DMA copy.
1669 static int scif_rma_copy(scif_epd_t epd
, off_t loffset
, unsigned long addr
,
1670 size_t len
, off_t roffset
, int flags
,
1671 enum scif_rma_dir dir
, bool last_chunk
)
1673 struct scif_endpt
*ep
= (struct scif_endpt
*)epd
;
1674 struct scif_rma_req remote_req
;
1675 struct scif_rma_req req
;
1676 struct scif_window
*local_window
= NULL
;
1677 struct scif_window
*remote_window
= NULL
;
1678 struct scif_copy_work copy_work
;
1681 struct dma_chan
*chan
;
1682 struct scif_mmu_notif
*mmn
= NULL
;
1684 struct device
*spdev
;
1686 err
= scif_verify_epd(ep
);
1690 if (flags
&& !(flags
& (SCIF_RMA_USECPU
| SCIF_RMA_USECACHE
|
1691 SCIF_RMA_SYNC
| SCIF_RMA_ORDERED
)))
1694 loopback
= scifdev_self(ep
->remote_dev
) ? true : false;
1695 copy_work
.fence_type
= ((flags
& SCIF_RMA_SYNC
) && last_chunk
) ?
1697 copy_work
.ordered
= !!((flags
& SCIF_RMA_ORDERED
) && last_chunk
);
1699 /* Use CPU for Mgmt node <-> Mgmt node copies */
1700 if (loopback
&& scif_is_mgmt_node()) {
1701 flags
|= SCIF_RMA_USECPU
;
1702 copy_work
.fence_type
= 0x0;
1705 cache
= scif_is_set_reg_cache(flags
);
1707 remote_req
.out_window
= &remote_window
;
1708 remote_req
.offset
= roffset
;
1709 remote_req
.nr_bytes
= len
;
1711 * If transfer is from local to remote then the remote window
1712 * must be writeable and vice versa.
1714 remote_req
.prot
= dir
== SCIF_LOCAL_TO_REMOTE
? VM_WRITE
: VM_READ
;
1715 remote_req
.type
= SCIF_WINDOW_PARTIAL
;
1716 remote_req
.head
= &ep
->rma_info
.remote_reg_list
;
1718 spdev
= scif_get_peer_dev(ep
->remote_dev
);
1719 if (IS_ERR(spdev
)) {
1720 err
= PTR_ERR(spdev
);
1724 if (addr
&& cache
) {
1725 mutex_lock(&ep
->rma_info
.mmn_lock
);
1726 mmn
= scif_find_mmu_notifier(current
->mm
, &ep
->rma_info
);
1728 scif_add_mmu_notifier(current
->mm
, ep
);
1729 mutex_unlock(&ep
->rma_info
.mmn_lock
);
1731 scif_put_peer_dev(spdev
);
1732 return PTR_ERR(mmn
);
1734 cache
= cache
&& !scif_rma_tc_can_cache(ep
, len
);
1736 mutex_lock(&ep
->rma_info
.rma_lock
);
1738 req
.out_window
= &local_window
;
1739 req
.nr_bytes
= ALIGN(len
+ (addr
& ~PAGE_MASK
),
1741 req
.va_for_temp
= addr
& PAGE_MASK
;
1742 req
.prot
= (dir
== SCIF_LOCAL_TO_REMOTE
?
1743 VM_READ
: VM_WRITE
| VM_READ
);
1744 /* Does a valid local window exist? */
1746 spin_lock(&ep
->rma_info
.tc_lock
);
1747 req
.head
= &mmn
->tc_reg_list
;
1748 err
= scif_query_tcw(ep
, &req
);
1749 spin_unlock(&ep
->rma_info
.tc_lock
);
1752 err
= scif_register_temp(epd
, req
.va_for_temp
,
1753 req
.nr_bytes
, req
.prot
,
1754 &loffset
, &local_window
);
1756 mutex_unlock(&ep
->rma_info
.rma_lock
);
1761 atomic_inc(&ep
->rma_info
.tcw_refcount
);
1762 atomic_add_return(local_window
->nr_pages
,
1763 &ep
->rma_info
.tcw_total_pages
);
1765 spin_lock(&ep
->rma_info
.tc_lock
);
1766 scif_insert_tcw(local_window
,
1768 spin_unlock(&ep
->rma_info
.tc_lock
);
1772 loffset
= local_window
->offset
+
1773 (addr
- local_window
->va_for_temp
);
1775 req
.out_window
= &local_window
;
1776 req
.offset
= loffset
;
1778 * If transfer is from local to remote then the self window
1779 * must be readable and vice versa.
1781 req
.prot
= dir
== SCIF_LOCAL_TO_REMOTE
? VM_READ
: VM_WRITE
;
1783 req
.type
= SCIF_WINDOW_PARTIAL
;
1784 req
.head
= &ep
->rma_info
.reg_list
;
1785 /* Does a valid local window exist? */
1786 err
= scif_query_window(&req
);
1788 mutex_unlock(&ep
->rma_info
.rma_lock
);
1793 /* Does a valid remote window exist? */
1794 err
= scif_query_window(&remote_req
);
1796 mutex_unlock(&ep
->rma_info
.rma_lock
);
1801 * Prepare copy_work for submitting work to the DMA kernel thread
1802 * or CPU copy routine.
1804 copy_work
.len
= len
;
1805 copy_work
.loopback
= loopback
;
1806 copy_work
.remote_dev
= ep
->remote_dev
;
1807 if (dir
== SCIF_LOCAL_TO_REMOTE
) {
1808 copy_work
.src_offset
= loffset
;
1809 copy_work
.src_window
= local_window
;
1810 copy_work
.dst_offset
= roffset
;
1811 copy_work
.dst_window
= remote_window
;
1813 copy_work
.src_offset
= roffset
;
1814 copy_work
.src_window
= remote_window
;
1815 copy_work
.dst_offset
= loffset
;
1816 copy_work
.dst_window
= local_window
;
1819 if (flags
& SCIF_RMA_USECPU
) {
1820 scif_rma_list_cpu_copy(©_work
);
1822 chan
= ep
->rma_info
.dma_chan
;
1823 err
= scif_rma_list_dma_copy_wrapper(epd
, ©_work
,
1827 atomic_inc(&ep
->rma_info
.tw_refcount
);
1829 mutex_unlock(&ep
->rma_info
.rma_lock
);
1832 struct scif_dev
*rdev
= ep
->remote_dev
;
1834 if (copy_work
.fence_type
== SCIF_DMA_POLL
)
1835 err
= scif_drain_dma_poll(rdev
->sdev
,
1836 ep
->rma_info
.dma_chan
);
1837 else if (copy_work
.fence_type
== SCIF_DMA_INTR
)
1838 err
= scif_drain_dma_intr(rdev
->sdev
,
1839 ep
->rma_info
.dma_chan
);
1843 scif_queue_for_cleanup(local_window
, &scif_info
.rma
);
1844 scif_put_peer_dev(spdev
);
1848 if (addr
&& local_window
&& !cache
)
1849 scif_destroy_window(ep
, local_window
);
1850 dev_err(scif_info
.mdev
.this_device
,
1851 "%s %d err %d len 0x%lx\n",
1852 __func__
, __LINE__
, err
, len
);
1854 scif_put_peer_dev(spdev
);
1858 int scif_readfrom(scif_epd_t epd
, off_t loffset
, size_t len
,
1859 off_t roffset
, int flags
)
1863 dev_dbg(scif_info
.mdev
.this_device
,
1864 "SCIFAPI readfrom: ep %p loffset 0x%lx len 0x%lx offset 0x%lx flags 0x%x\n",
1865 epd
, loffset
, len
, roffset
, flags
);
1866 if (scif_unaligned(loffset
, roffset
)) {
1867 while (len
> SCIF_MAX_UNALIGNED_BUF_SIZE
) {
1868 err
= scif_rma_copy(epd
, loffset
, 0x0,
1869 SCIF_MAX_UNALIGNED_BUF_SIZE
,
1871 SCIF_REMOTE_TO_LOCAL
, false);
1874 loffset
+= SCIF_MAX_UNALIGNED_BUF_SIZE
;
1875 roffset
+= SCIF_MAX_UNALIGNED_BUF_SIZE
;
1876 len
-= SCIF_MAX_UNALIGNED_BUF_SIZE
;
1879 err
= scif_rma_copy(epd
, loffset
, 0x0, len
,
1880 roffset
, flags
, SCIF_REMOTE_TO_LOCAL
, true);
1884 EXPORT_SYMBOL_GPL(scif_readfrom
);
1886 int scif_writeto(scif_epd_t epd
, off_t loffset
, size_t len
,
1887 off_t roffset
, int flags
)
1891 dev_dbg(scif_info
.mdev
.this_device
,
1892 "SCIFAPI writeto: ep %p loffset 0x%lx len 0x%lx roffset 0x%lx flags 0x%x\n",
1893 epd
, loffset
, len
, roffset
, flags
);
1894 if (scif_unaligned(loffset
, roffset
)) {
1895 while (len
> SCIF_MAX_UNALIGNED_BUF_SIZE
) {
1896 err
= scif_rma_copy(epd
, loffset
, 0x0,
1897 SCIF_MAX_UNALIGNED_BUF_SIZE
,
1899 SCIF_LOCAL_TO_REMOTE
, false);
1902 loffset
+= SCIF_MAX_UNALIGNED_BUF_SIZE
;
1903 roffset
+= SCIF_MAX_UNALIGNED_BUF_SIZE
;
1904 len
-= SCIF_MAX_UNALIGNED_BUF_SIZE
;
1907 err
= scif_rma_copy(epd
, loffset
, 0x0, len
,
1908 roffset
, flags
, SCIF_LOCAL_TO_REMOTE
, true);
1912 EXPORT_SYMBOL_GPL(scif_writeto
);
1914 int scif_vreadfrom(scif_epd_t epd
, void *addr
, size_t len
,
1915 off_t roffset
, int flags
)
1919 dev_dbg(scif_info
.mdev
.this_device
,
1920 "SCIFAPI vreadfrom: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n",
1921 epd
, addr
, len
, roffset
, flags
);
1922 if (scif_unaligned((off_t __force
)addr
, roffset
)) {
1923 if (len
> SCIF_MAX_UNALIGNED_BUF_SIZE
)
1924 flags
&= ~SCIF_RMA_USECACHE
;
1926 while (len
> SCIF_MAX_UNALIGNED_BUF_SIZE
) {
1927 err
= scif_rma_copy(epd
, 0, (u64
)addr
,
1928 SCIF_MAX_UNALIGNED_BUF_SIZE
,
1930 SCIF_REMOTE_TO_LOCAL
, false);
1933 addr
+= SCIF_MAX_UNALIGNED_BUF_SIZE
;
1934 roffset
+= SCIF_MAX_UNALIGNED_BUF_SIZE
;
1935 len
-= SCIF_MAX_UNALIGNED_BUF_SIZE
;
1938 err
= scif_rma_copy(epd
, 0, (u64
)addr
, len
,
1939 roffset
, flags
, SCIF_REMOTE_TO_LOCAL
, true);
1943 EXPORT_SYMBOL_GPL(scif_vreadfrom
);
1945 int scif_vwriteto(scif_epd_t epd
, void *addr
, size_t len
,
1946 off_t roffset
, int flags
)
1950 dev_dbg(scif_info
.mdev
.this_device
,
1951 "SCIFAPI vwriteto: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n",
1952 epd
, addr
, len
, roffset
, flags
);
1953 if (scif_unaligned((off_t __force
)addr
, roffset
)) {
1954 if (len
> SCIF_MAX_UNALIGNED_BUF_SIZE
)
1955 flags
&= ~SCIF_RMA_USECACHE
;
1957 while (len
> SCIF_MAX_UNALIGNED_BUF_SIZE
) {
1958 err
= scif_rma_copy(epd
, 0, (u64
)addr
,
1959 SCIF_MAX_UNALIGNED_BUF_SIZE
,
1961 SCIF_LOCAL_TO_REMOTE
, false);
1964 addr
+= SCIF_MAX_UNALIGNED_BUF_SIZE
;
1965 roffset
+= SCIF_MAX_UNALIGNED_BUF_SIZE
;
1966 len
-= SCIF_MAX_UNALIGNED_BUF_SIZE
;
1969 err
= scif_rma_copy(epd
, 0, (u64
)addr
, len
,
1970 roffset
, flags
, SCIF_LOCAL_TO_REMOTE
, true);
1974 EXPORT_SYMBOL_GPL(scif_vwriteto
);