2 * Intel MIC Platform Software Stack (MPSS)
4 * Copyright(c) 2015 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
18 #include <linux/intel-iommu.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched/mm.h>
21 #include <linux/sched/signal.h>
23 #include "scif_main.h"
26 /* Used to skip ulimit checks for registrations with SCIF_MAP_KERNEL flag */
27 #define SCIF_MAP_ULIMIT 0x40
29 bool scif_ulimit_check
= 1;
35 * Initialize RMA per EP data structures.
37 void scif_rma_ep_init(struct scif_endpt
*ep
)
39 struct scif_endpt_rma_info
*rma
= &ep
->rma_info
;
41 mutex_init(&rma
->rma_lock
);
42 init_iova_domain(&rma
->iovad
, PAGE_SIZE
, SCIF_IOVA_START_PFN
);
43 spin_lock_init(&rma
->tc_lock
);
44 mutex_init(&rma
->mmn_lock
);
45 INIT_LIST_HEAD(&rma
->reg_list
);
46 INIT_LIST_HEAD(&rma
->remote_reg_list
);
47 atomic_set(&rma
->tw_refcount
, 0);
48 atomic_set(&rma
->tcw_refcount
, 0);
49 atomic_set(&rma
->tcw_total_pages
, 0);
50 atomic_set(&rma
->fence_refcount
, 0);
52 rma
->async_list_del
= 0;
54 INIT_LIST_HEAD(&rma
->mmn_list
);
55 INIT_LIST_HEAD(&rma
->vma_list
);
56 init_waitqueue_head(&rma
->markwq
);
60 * scif_rma_ep_can_uninit:
63 * Returns 1 if an endpoint can be uninitialized and 0 otherwise.
65 int scif_rma_ep_can_uninit(struct scif_endpt
*ep
)
69 mutex_lock(&ep
->rma_info
.rma_lock
);
70 /* Destroy RMA Info only if both lists are empty */
71 if (list_empty(&ep
->rma_info
.reg_list
) &&
72 list_empty(&ep
->rma_info
.remote_reg_list
) &&
73 list_empty(&ep
->rma_info
.mmn_list
) &&
74 !atomic_read(&ep
->rma_info
.tw_refcount
) &&
75 !atomic_read(&ep
->rma_info
.tcw_refcount
) &&
76 !atomic_read(&ep
->rma_info
.fence_refcount
))
78 mutex_unlock(&ep
->rma_info
.rma_lock
);
83 * scif_create_pinned_pages:
84 * @nr_pages: number of pages in window
85 * @prot: read/write protection
87 * Allocate and prepare a set of pinned pages.
89 static struct scif_pinned_pages
*
90 scif_create_pinned_pages(int nr_pages
, int prot
)
92 struct scif_pinned_pages
*pin
;
95 pin
= scif_zalloc(sizeof(*pin
));
99 pin
->pages
= scif_zalloc(nr_pages
* sizeof(*pin
->pages
));
101 goto error_free_pinned_pages
;
104 pin
->magic
= SCIFEP_MAGIC
;
107 error_free_pinned_pages
:
108 scif_free(pin
, sizeof(*pin
));
114 * scif_destroy_pinned_pages:
115 * @pin: A set of pinned pages.
117 * Deallocate resources for pinned pages.
119 static int scif_destroy_pinned_pages(struct scif_pinned_pages
*pin
)
122 int writeable
= pin
->prot
& SCIF_PROT_WRITE
;
123 int kernel
= SCIF_MAP_KERNEL
& pin
->map_flags
;
125 for (j
= 0; j
< pin
->nr_pages
; j
++) {
126 if (pin
->pages
[j
] && !kernel
) {
128 SetPageDirty(pin
->pages
[j
]);
129 put_page(pin
->pages
[j
]);
133 scif_free(pin
->pages
,
134 pin
->nr_pages
* sizeof(*pin
->pages
));
135 scif_free(pin
, sizeof(*pin
));
140 * scif_create_window:
142 * @nr_pages: number of pages
143 * @offset: registration offset
144 * @temp: true if a temporary window is being created
146 * Allocate and prepare a self registration window.
148 struct scif_window
*scif_create_window(struct scif_endpt
*ep
, int nr_pages
,
149 s64 offset
, bool temp
)
151 struct scif_window
*window
;
154 window
= scif_zalloc(sizeof(*window
));
158 window
->dma_addr
= scif_zalloc(nr_pages
* sizeof(*window
->dma_addr
));
159 if (!window
->dma_addr
)
160 goto error_free_window
;
162 window
->num_pages
= scif_zalloc(nr_pages
* sizeof(*window
->num_pages
));
163 if (!window
->num_pages
)
164 goto error_free_window
;
166 window
->offset
= offset
;
167 window
->ep
= (u64
)ep
;
168 window
->magic
= SCIFEP_MAGIC
;
169 window
->reg_state
= OP_IDLE
;
170 init_waitqueue_head(&window
->regwq
);
171 window
->unreg_state
= OP_IDLE
;
172 init_waitqueue_head(&window
->unregwq
);
173 INIT_LIST_HEAD(&window
->list
);
174 window
->type
= SCIF_WINDOW_SELF
;
179 scif_free(window
->dma_addr
,
180 nr_pages
* sizeof(*window
->dma_addr
));
181 scif_free(window
, sizeof(*window
));
187 * scif_destroy_incomplete_window:
189 * @window: registration window
191 * Deallocate resources for self window.
193 static void scif_destroy_incomplete_window(struct scif_endpt
*ep
,
194 struct scif_window
*window
)
197 int nr_pages
= window
->nr_pages
;
198 struct scif_allocmsg
*alloc
= &window
->alloc_handle
;
202 /* Wait for a SCIF_ALLOC_GNT/REJ message */
203 err
= wait_event_timeout(alloc
->allocwq
,
204 alloc
->state
!= OP_IN_PROGRESS
,
205 SCIF_NODE_ALIVE_TIMEOUT
);
206 if (!err
&& scifdev_alive(ep
))
209 mutex_lock(&ep
->rma_info
.rma_lock
);
210 if (alloc
->state
== OP_COMPLETED
) {
211 msg
.uop
= SCIF_FREE_VIRT
;
213 msg
.payload
[0] = ep
->remote_ep
;
214 msg
.payload
[1] = window
->alloc_handle
.vaddr
;
215 msg
.payload
[2] = (u64
)window
;
216 msg
.payload
[3] = SCIF_REGISTER
;
217 _scif_nodeqp_send(ep
->remote_dev
, &msg
);
219 mutex_unlock(&ep
->rma_info
.rma_lock
);
221 scif_free_window_offset(ep
, window
, window
->offset
);
222 scif_free(window
->dma_addr
, nr_pages
* sizeof(*window
->dma_addr
));
223 scif_free(window
->num_pages
, nr_pages
* sizeof(*window
->num_pages
));
224 scif_free(window
, sizeof(*window
));
229 * @remote_dev: SCIF remote device
230 * @window: registration window
232 * Delete any DMA mappings created for a registered self window
234 void scif_unmap_window(struct scif_dev
*remote_dev
, struct scif_window
*window
)
238 if (scif_is_iommu_enabled() && !scifdev_self(remote_dev
)) {
240 dma_unmap_sg(&remote_dev
->sdev
->dev
,
241 window
->st
->sgl
, window
->st
->nents
,
243 sg_free_table(window
->st
);
248 for (j
= 0; j
< window
->nr_contig_chunks
; j
++) {
249 if (window
->dma_addr
[j
]) {
250 scif_unmap_single(window
->dma_addr
[j
],
252 window
->num_pages
[j
] <<
254 window
->dma_addr
[j
] = 0x0;
260 static inline struct mm_struct
*__scif_acquire_mm(void)
262 if (scif_ulimit_check
)
263 return get_task_mm(current
);
267 static inline void __scif_release_mm(struct mm_struct
*mm
)
274 __scif_dec_pinned_vm_lock(struct mm_struct
*mm
,
277 if (!mm
|| !nr_pages
|| !scif_ulimit_check
)
280 atomic64_sub(nr_pages
, &mm
->pinned_vm
);
284 static inline int __scif_check_inc_pinned_vm(struct mm_struct
*mm
,
287 unsigned long locked
, lock_limit
;
289 if (!mm
|| !nr_pages
|| !scif_ulimit_check
)
292 lock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
293 locked
= atomic64_add_return(nr_pages
, &mm
->pinned_vm
);
295 if ((locked
> lock_limit
) && !capable(CAP_IPC_LOCK
)) {
296 atomic64_sub(nr_pages
, &mm
->pinned_vm
);
297 dev_err(scif_info
.mdev
.this_device
,
298 "locked(%lu) > lock_limit(%lu)\n",
306 * scif_destroy_window:
308 * @window: registration window
310 * Deallocate resources for self window.
312 int scif_destroy_window(struct scif_endpt
*ep
, struct scif_window
*window
)
315 struct scif_pinned_pages
*pinned_pages
= window
->pinned_pages
;
316 int nr_pages
= window
->nr_pages
;
319 if (!window
->temp
&& window
->mm
) {
320 __scif_dec_pinned_vm_lock(window
->mm
, window
->nr_pages
);
321 __scif_release_mm(window
->mm
);
325 scif_free_window_offset(ep
, window
, window
->offset
);
326 scif_unmap_window(ep
->remote_dev
, window
);
328 * Decrement references for this set of pinned pages from
331 j
= atomic_sub_return(1, &pinned_pages
->ref_count
);
333 dev_err(scif_info
.mdev
.this_device
,
334 "%s %d incorrect ref count %d\n",
335 __func__
, __LINE__
, j
);
337 * If the ref count for pinned_pages is zero then someone
338 * has already called scif_unpin_pages() for it and we should
339 * destroy the page cache.
342 scif_destroy_pinned_pages(window
->pinned_pages
);
343 scif_free(window
->dma_addr
, nr_pages
* sizeof(*window
->dma_addr
));
344 scif_free(window
->num_pages
, nr_pages
* sizeof(*window
->num_pages
));
346 scif_free(window
, sizeof(*window
));
351 * scif_create_remote_lookup:
352 * @remote_dev: SCIF remote device
353 * @window: remote window
355 * Allocate and prepare lookup entries for the remote
356 * end to copy over the physical addresses.
357 * Returns 0 on success and appropriate errno on failure.
359 static int scif_create_remote_lookup(struct scif_dev
*remote_dev
,
360 struct scif_window
*window
)
363 int nr_pages
= window
->nr_pages
;
364 bool vmalloc_dma_phys
, vmalloc_num_pages
;
368 err
= scif_map_single(&window
->mapped_offset
,
369 window
, remote_dev
, sizeof(*window
));
373 /* Compute the number of lookup entries. 21 == 2MB Shift */
374 window
->nr_lookup
= ALIGN(nr_pages
* PAGE_SIZE
,
375 ((2) * 1024 * 1024)) >> 21;
377 window
->dma_addr_lookup
.lookup
=
378 scif_alloc_coherent(&window
->dma_addr_lookup
.offset
,
379 remote_dev
, window
->nr_lookup
*
380 sizeof(*window
->dma_addr_lookup
.lookup
),
381 GFP_KERNEL
| __GFP_ZERO
);
382 if (!window
->dma_addr_lookup
.lookup
) {
387 window
->num_pages_lookup
.lookup
=
388 scif_alloc_coherent(&window
->num_pages_lookup
.offset
,
389 remote_dev
, window
->nr_lookup
*
390 sizeof(*window
->num_pages_lookup
.lookup
),
391 GFP_KERNEL
| __GFP_ZERO
);
392 if (!window
->num_pages_lookup
.lookup
) {
397 vmalloc_dma_phys
= is_vmalloc_addr(&window
->dma_addr
[0]);
398 vmalloc_num_pages
= is_vmalloc_addr(&window
->num_pages
[0]);
400 /* Now map each of the pages containing physical addresses */
401 for (i
= 0, j
= 0; i
< nr_pages
; i
+= SCIF_NR_ADDR_IN_PAGE
, j
++) {
402 err
= scif_map_page(&window
->dma_addr_lookup
.lookup
[j
],
404 vmalloc_to_page(&window
->dma_addr
[i
]) :
405 virt_to_page(&window
->dma_addr
[i
]),
409 err
= scif_map_page(&window
->num_pages_lookup
.lookup
[j
],
411 vmalloc_to_page(&window
->num_pages
[i
]) :
412 virt_to_page(&window
->num_pages
[i
]),
423 * scif_destroy_remote_lookup:
424 * @remote_dev: SCIF remote device
425 * @window: remote window
427 * Destroy lookup entries used for the remote
428 * end to copy over the physical addresses.
430 static void scif_destroy_remote_lookup(struct scif_dev
*remote_dev
,
431 struct scif_window
*window
)
435 if (window
->nr_lookup
) {
436 struct scif_rma_lookup
*lup
= &window
->dma_addr_lookup
;
437 struct scif_rma_lookup
*npup
= &window
->num_pages_lookup
;
439 for (i
= 0, j
= 0; i
< window
->nr_pages
;
440 i
+= SCIF_NR_ADDR_IN_PAGE
, j
++) {
441 if (lup
->lookup
&& lup
->lookup
[j
])
442 scif_unmap_single(lup
->lookup
[j
],
445 if (npup
->lookup
&& npup
->lookup
[j
])
446 scif_unmap_single(npup
->lookup
[j
],
451 scif_free_coherent(lup
->lookup
, lup
->offset
,
452 remote_dev
, window
->nr_lookup
*
453 sizeof(*lup
->lookup
));
455 scif_free_coherent(npup
->lookup
, npup
->offset
,
456 remote_dev
, window
->nr_lookup
*
457 sizeof(*npup
->lookup
));
458 if (window
->mapped_offset
)
459 scif_unmap_single(window
->mapped_offset
,
460 remote_dev
, sizeof(*window
));
461 window
->nr_lookup
= 0;
466 * scif_create_remote_window:
468 * @nr_pages: number of pages in window
470 * Allocate and prepare a remote registration window.
472 static struct scif_window
*
473 scif_create_remote_window(struct scif_dev
*scifdev
, int nr_pages
)
475 struct scif_window
*window
;
478 window
= scif_zalloc(sizeof(*window
));
482 window
->magic
= SCIFEP_MAGIC
;
483 window
->nr_pages
= nr_pages
;
485 window
->dma_addr
= scif_zalloc(nr_pages
* sizeof(*window
->dma_addr
));
486 if (!window
->dma_addr
)
489 window
->num_pages
= scif_zalloc(nr_pages
*
490 sizeof(*window
->num_pages
));
491 if (!window
->num_pages
)
494 if (scif_create_remote_lookup(scifdev
, window
))
497 window
->type
= SCIF_WINDOW_PEER
;
498 window
->unreg_state
= OP_IDLE
;
499 INIT_LIST_HEAD(&window
->list
);
502 scif_destroy_remote_window(window
);
508 * scif_destroy_remote_window:
510 * @window: remote registration window
512 * Deallocate resources for remote window.
515 scif_destroy_remote_window(struct scif_window
*window
)
517 scif_free(window
->dma_addr
, window
->nr_pages
*
518 sizeof(*window
->dma_addr
));
519 scif_free(window
->num_pages
, window
->nr_pages
*
520 sizeof(*window
->num_pages
));
522 scif_free(window
, sizeof(*window
));
526 * scif_iommu_map: create DMA mappings if the IOMMU is enabled
527 * @remote_dev: SCIF remote device
528 * @window: remote registration window
530 * Map the physical pages using dma_map_sg(..) and then detect the number
531 * of contiguous DMA mappings allocated
533 static int scif_iommu_map(struct scif_dev
*remote_dev
,
534 struct scif_window
*window
)
536 struct scatterlist
*sg
;
538 scif_pinned_pages_t pin
= window
->pinned_pages
;
540 window
->st
= kzalloc(sizeof(*window
->st
), GFP_KERNEL
);
544 err
= sg_alloc_table(window
->st
, window
->nr_pages
, GFP_KERNEL
);
548 for_each_sg(window
->st
->sgl
, sg
, window
->st
->nents
, i
)
549 sg_set_page(sg
, pin
->pages
[i
], PAGE_SIZE
, 0x0);
551 err
= dma_map_sg(&remote_dev
->sdev
->dev
, window
->st
->sgl
,
552 window
->st
->nents
, DMA_BIDIRECTIONAL
);
555 /* Detect contiguous ranges of DMA mappings */
556 sg
= window
->st
->sgl
;
557 for (i
= 0; sg
; i
++) {
560 window
->dma_addr
[i
] = sg_dma_address(sg
);
561 window
->num_pages
[i
] = sg_dma_len(sg
) >> PAGE_SHIFT
;
562 last_da
= sg_dma_address(sg
) + sg_dma_len(sg
);
563 while ((sg
= sg_next(sg
)) && sg_dma_address(sg
) == last_da
) {
564 window
->num_pages
[i
] +=
565 (sg_dma_len(sg
) >> PAGE_SHIFT
);
566 last_da
= window
->dma_addr
[i
] +
569 window
->nr_contig_chunks
++;
576 * @remote_dev: SCIF remote device
577 * @window: self registration window
579 * Map pages of a window into the aperture/PCI.
580 * Also determine addresses required for DMA.
583 scif_map_window(struct scif_dev
*remote_dev
, struct scif_window
*window
)
585 int i
, j
, k
, err
= 0, nr_contig_pages
;
586 scif_pinned_pages_t pin
;
587 phys_addr_t phys_prev
, phys_curr
;
591 pin
= window
->pinned_pages
;
593 if (intel_iommu_enabled
&& !scifdev_self(remote_dev
))
594 return scif_iommu_map(remote_dev
, window
);
596 for (i
= 0, j
= 0; i
< window
->nr_pages
; i
+= nr_contig_pages
, j
++) {
597 phys_prev
= page_to_phys(pin
->pages
[i
]);
600 /* Detect physically contiguous chunks */
601 for (k
= i
+ 1; k
< window
->nr_pages
; k
++) {
602 phys_curr
= page_to_phys(pin
->pages
[k
]);
603 if (phys_curr
!= (phys_prev
+ PAGE_SIZE
))
605 phys_prev
= phys_curr
;
608 window
->num_pages
[j
] = nr_contig_pages
;
609 window
->nr_contig_chunks
++;
610 if (scif_is_mgmt_node()) {
612 * Management node has to deal with SMPT on X100 and
613 * hence the DMA mapping is required
615 err
= scif_map_single(&window
->dma_addr
[j
],
616 phys_to_virt(page_to_phys(
619 nr_contig_pages
<< PAGE_SHIFT
);
623 window
->dma_addr
[j
] = page_to_phys(pin
->pages
[i
]);
630 * scif_send_scif_unregister:
632 * @window: self registration window
634 * Send a SCIF_UNREGISTER message.
636 static int scif_send_scif_unregister(struct scif_endpt
*ep
,
637 struct scif_window
*window
)
641 msg
.uop
= SCIF_UNREGISTER
;
643 msg
.payload
[0] = window
->alloc_handle
.vaddr
;
644 msg
.payload
[1] = (u64
)window
;
645 return scif_nodeqp_send(ep
->remote_dev
, &msg
);
649 * scif_unregister_window:
650 * @window: self registration window
652 * Send an unregistration request and wait for a response.
654 int scif_unregister_window(struct scif_window
*window
)
657 struct scif_endpt
*ep
= (struct scif_endpt
*)window
->ep
;
658 bool send_msg
= false;
661 switch (window
->unreg_state
) {
664 window
->unreg_state
= OP_IN_PROGRESS
;
670 scif_get_window(window
, 1);
671 mutex_unlock(&ep
->rma_info
.rma_lock
);
673 err
= scif_send_scif_unregister(ep
, window
);
675 window
->unreg_state
= OP_COMPLETED
;
679 /* Return ENXIO since unregistration is in progress */
680 mutex_lock(&ep
->rma_info
.rma_lock
);
684 /* Wait for a SCIF_UNREGISTER_(N)ACK message */
685 err
= wait_event_timeout(window
->unregwq
,
686 window
->unreg_state
!= OP_IN_PROGRESS
,
687 SCIF_NODE_ALIVE_TIMEOUT
);
688 if (!err
&& scifdev_alive(ep
))
692 window
->unreg_state
= OP_COMPLETED
;
693 dev_err(scif_info
.mdev
.this_device
,
694 "%s %d err %d\n", __func__
, __LINE__
, err
);
699 mutex_lock(&ep
->rma_info
.rma_lock
);
700 scif_put_window(window
, 1);
705 if (!scifdev_alive(ep
)) {
707 window
->unreg_state
= OP_COMPLETED
;
717 if (window
->unreg_state
== OP_COMPLETED
&& window
->ref_count
)
718 scif_put_window(window
, window
->nr_pages
);
720 if (!window
->ref_count
) {
721 atomic_inc(&ep
->rma_info
.tw_refcount
);
722 list_del_init(&window
->list
);
723 scif_free_window_offset(ep
, window
, window
->offset
);
724 mutex_unlock(&ep
->rma_info
.rma_lock
);
725 if ((!!(window
->pinned_pages
->map_flags
& SCIF_MAP_KERNEL
)) &&
727 scif_drain_dma_intr(ep
->remote_dev
->sdev
,
728 ep
->rma_info
.dma_chan
);
730 if (!__scif_dec_pinned_vm_lock(window
->mm
,
732 __scif_release_mm(window
->mm
);
736 scif_queue_for_cleanup(window
, &scif_info
.rma
);
737 mutex_lock(&ep
->rma_info
.rma_lock
);
743 * scif_send_alloc_request:
745 * @window: self registration window
747 * Send a remote window allocation request
749 static int scif_send_alloc_request(struct scif_endpt
*ep
,
750 struct scif_window
*window
)
753 struct scif_allocmsg
*alloc
= &window
->alloc_handle
;
755 /* Set up the Alloc Handle */
756 alloc
->state
= OP_IN_PROGRESS
;
757 init_waitqueue_head(&alloc
->allocwq
);
759 /* Send out an allocation request */
760 msg
.uop
= SCIF_ALLOC_REQ
;
761 msg
.payload
[1] = window
->nr_pages
;
762 msg
.payload
[2] = (u64
)&window
->alloc_handle
;
763 return _scif_nodeqp_send(ep
->remote_dev
, &msg
);
767 * scif_prep_remote_window:
769 * @window: self registration window
771 * Send a remote window allocation request, wait for an allocation response,
772 * and prepares the remote window by copying over the page lists
774 static int scif_prep_remote_window(struct scif_endpt
*ep
,
775 struct scif_window
*window
)
778 struct scif_window
*remote_window
;
779 struct scif_allocmsg
*alloc
= &window
->alloc_handle
;
780 dma_addr_t
*dma_phys_lookup
, *tmp
, *num_pages_lookup
, *tmp1
;
782 int nr_contig_chunks
, loop_nr_contig_chunks
;
783 int remaining_nr_contig_chunks
, nr_lookup
;
786 map_err
= scif_map_window(ep
->remote_dev
, window
);
788 dev_err(&ep
->remote_dev
->sdev
->dev
,
789 "%s %d map_err %d\n", __func__
, __LINE__
, map_err
);
790 remaining_nr_contig_chunks
= window
->nr_contig_chunks
;
791 nr_contig_chunks
= window
->nr_contig_chunks
;
793 /* Wait for a SCIF_ALLOC_GNT/REJ message */
794 err
= wait_event_timeout(alloc
->allocwq
,
795 alloc
->state
!= OP_IN_PROGRESS
,
796 SCIF_NODE_ALIVE_TIMEOUT
);
797 mutex_lock(&ep
->rma_info
.rma_lock
);
798 /* Synchronize with the thread waking up allocwq */
799 mutex_unlock(&ep
->rma_info
.rma_lock
);
800 if (!err
&& scifdev_alive(ep
))
811 /* Bail out. The remote end rejected this request */
812 if (alloc
->state
== OP_FAILED
)
816 dev_err(&ep
->remote_dev
->sdev
->dev
,
817 "%s %d err %d\n", __func__
, __LINE__
, map_err
);
818 msg
.uop
= SCIF_FREE_VIRT
;
820 msg
.payload
[0] = ep
->remote_ep
;
821 msg
.payload
[1] = window
->alloc_handle
.vaddr
;
822 msg
.payload
[2] = (u64
)window
;
823 msg
.payload
[3] = SCIF_REGISTER
;
824 spin_lock(&ep
->lock
);
825 if (ep
->state
== SCIFEP_CONNECTED
)
826 err
= _scif_nodeqp_send(ep
->remote_dev
, &msg
);
829 spin_unlock(&ep
->lock
);
833 remote_window
= scif_ioremap(alloc
->phys_addr
, sizeof(*window
),
836 /* Compute the number of lookup entries. 21 == 2MB Shift */
837 nr_lookup
= ALIGN(nr_contig_chunks
, SCIF_NR_ADDR_IN_PAGE
)
838 >> ilog2(SCIF_NR_ADDR_IN_PAGE
);
841 scif_ioremap(remote_window
->dma_addr_lookup
.offset
,
843 sizeof(*remote_window
->dma_addr_lookup
.lookup
),
846 scif_ioremap(remote_window
->num_pages_lookup
.offset
,
848 sizeof(*remote_window
->num_pages_lookup
.lookup
),
851 while (remaining_nr_contig_chunks
) {
852 loop_nr_contig_chunks
= min_t(int, remaining_nr_contig_chunks
,
853 (int)SCIF_NR_ADDR_IN_PAGE
);
854 /* #1/2 - Copy physical addresses over to the remote side */
856 /* #2/2 - Copy DMA addresses (addresses that are fed into the
857 * DMA engine) We transfer bus addresses which are then
858 * converted into a MIC physical address on the remote
859 * side if it is a MIC, if the remote node is a mgmt node we
860 * transfer the MIC physical address
862 tmp
= scif_ioremap(dma_phys_lookup
[j
],
863 loop_nr_contig_chunks
*
864 sizeof(*window
->dma_addr
),
866 tmp1
= scif_ioremap(num_pages_lookup
[j
],
867 loop_nr_contig_chunks
*
868 sizeof(*window
->num_pages
),
870 if (scif_is_mgmt_node()) {
871 memcpy_toio((void __force __iomem
*)tmp
,
872 &window
->dma_addr
[i
], loop_nr_contig_chunks
873 * sizeof(*window
->dma_addr
));
874 memcpy_toio((void __force __iomem
*)tmp1
,
875 &window
->num_pages
[i
], loop_nr_contig_chunks
876 * sizeof(*window
->num_pages
));
878 if (scifdev_is_p2p(ep
->remote_dev
)) {
880 * add remote node's base address for this node
881 * to convert it into a MIC address
886 for (m
= 0; m
< loop_nr_contig_chunks
; m
++) {
887 dma_addr
= window
->dma_addr
[i
+ m
] +
888 ep
->remote_dev
->base_addr
;
890 (void __force __iomem
*)&tmp
[m
]);
892 memcpy_toio((void __force __iomem
*)tmp1
,
893 &window
->num_pages
[i
],
894 loop_nr_contig_chunks
895 * sizeof(*window
->num_pages
));
897 /* Mgmt node or loopback - transfer DMA
898 * addresses as is, this is the same as a
899 * MIC physical address (we use the dma_addr
900 * and not the phys_addr array since the
901 * phys_addr is only setup if there is a mmap()
902 * request from the mgmt node)
904 memcpy_toio((void __force __iomem
*)tmp
,
905 &window
->dma_addr
[i
],
906 loop_nr_contig_chunks
*
907 sizeof(*window
->dma_addr
));
908 memcpy_toio((void __force __iomem
*)tmp1
,
909 &window
->num_pages
[i
],
910 loop_nr_contig_chunks
*
911 sizeof(*window
->num_pages
));
914 remaining_nr_contig_chunks
-= loop_nr_contig_chunks
;
915 i
+= loop_nr_contig_chunks
;
917 scif_iounmap(tmp
, loop_nr_contig_chunks
*
918 sizeof(*window
->dma_addr
), ep
->remote_dev
);
919 scif_iounmap(tmp1
, loop_nr_contig_chunks
*
920 sizeof(*window
->num_pages
), ep
->remote_dev
);
923 /* Prepare the remote window for the peer */
924 remote_window
->peer_window
= (u64
)window
;
925 remote_window
->offset
= window
->offset
;
926 remote_window
->prot
= window
->prot
;
927 remote_window
->nr_contig_chunks
= nr_contig_chunks
;
928 remote_window
->ep
= ep
->remote_ep
;
929 scif_iounmap(num_pages_lookup
,
931 sizeof(*remote_window
->num_pages_lookup
.lookup
),
933 scif_iounmap(dma_phys_lookup
,
935 sizeof(*remote_window
->dma_addr_lookup
.lookup
),
937 scif_iounmap(remote_window
, sizeof(*remote_window
), ep
->remote_dev
);
938 window
->peer_window
= alloc
->vaddr
;
943 * scif_send_scif_register:
945 * @window: self registration window
947 * Send a SCIF_REGISTER message if EP is connected and wait for a
948 * SCIF_REGISTER_(N)ACK message else send a SCIF_FREE_VIRT
949 * message so that the peer can free its remote window allocated earlier.
951 static int scif_send_scif_register(struct scif_endpt
*ep
,
952 struct scif_window
*window
)
958 msg
.payload
[0] = ep
->remote_ep
;
959 msg
.payload
[1] = window
->alloc_handle
.vaddr
;
960 msg
.payload
[2] = (u64
)window
;
961 spin_lock(&ep
->lock
);
962 if (ep
->state
== SCIFEP_CONNECTED
) {
963 msg
.uop
= SCIF_REGISTER
;
964 window
->reg_state
= OP_IN_PROGRESS
;
965 err
= _scif_nodeqp_send(ep
->remote_dev
, &msg
);
966 spin_unlock(&ep
->lock
);
969 /* Wait for a SCIF_REGISTER_(N)ACK message */
970 err
= wait_event_timeout(window
->regwq
,
973 SCIF_NODE_ALIVE_TIMEOUT
);
974 if (!err
&& scifdev_alive(ep
))
976 err
= !err
? -ENODEV
: 0;
977 if (window
->reg_state
== OP_FAILED
)
981 msg
.uop
= SCIF_FREE_VIRT
;
982 msg
.payload
[3] = SCIF_REGISTER
;
983 err
= _scif_nodeqp_send(ep
->remote_dev
, &msg
);
984 spin_unlock(&ep
->lock
);
992 * scif_get_window_offset:
993 * @ep: end point descriptor
995 * @offset: offset hint
996 * @num_pages: number of pages
997 * @out_offset: computed offset returned by reference.
999 * Compute/Claim a new offset for this EP.
1001 int scif_get_window_offset(struct scif_endpt
*ep
, int flags
, s64 offset
,
1002 int num_pages
, s64
*out_offset
)
1005 struct iova
*iova_ptr
;
1008 if (flags
& SCIF_MAP_FIXED
) {
1009 page_index
= SCIF_IOVA_PFN(offset
);
1010 iova_ptr
= reserve_iova(&ep
->rma_info
.iovad
, page_index
,
1011 page_index
+ num_pages
- 1);
1015 iova_ptr
= alloc_iova(&ep
->rma_info
.iovad
, num_pages
,
1016 SCIF_DMA_63BIT_PFN
- 1, 0);
1021 *out_offset
= (iova_ptr
->pfn_lo
) << PAGE_SHIFT
;
1026 * scif_free_window_offset:
1027 * @ep: end point descriptor
1028 * @window: registration window
1029 * @offset: Offset to be freed
1031 * Free offset for this EP. The callee is supposed to grab
1032 * the RMA mutex before calling this API.
1034 void scif_free_window_offset(struct scif_endpt
*ep
,
1035 struct scif_window
*window
, s64 offset
)
1037 if ((window
&& !window
->offset_freed
) || !window
) {
1038 free_iova(&ep
->rma_info
.iovad
, offset
>> PAGE_SHIFT
);
1040 window
->offset_freed
= true;
1045 * scif_alloc_req: Respond to SCIF_ALLOC_REQ interrupt message
1046 * @msg: Interrupt message
1048 * Remote side is requesting a memory allocation.
1050 void scif_alloc_req(struct scif_dev
*scifdev
, struct scifmsg
*msg
)
1053 struct scif_window
*window
= NULL
;
1054 int nr_pages
= msg
->payload
[1];
1056 window
= scif_create_remote_window(scifdev
, nr_pages
);
1062 /* The peer's allocation request is granted */
1063 msg
->uop
= SCIF_ALLOC_GNT
;
1064 msg
->payload
[0] = (u64
)window
;
1065 msg
->payload
[1] = window
->mapped_offset
;
1066 err
= scif_nodeqp_send(scifdev
, msg
);
1068 scif_destroy_remote_window(window
);
1071 /* The peer's allocation request is rejected */
1072 dev_err(&scifdev
->sdev
->dev
,
1073 "%s %d error %d alloc_ptr %p nr_pages 0x%x\n",
1074 __func__
, __LINE__
, err
, window
, nr_pages
);
1075 msg
->uop
= SCIF_ALLOC_REJ
;
1076 scif_nodeqp_send(scifdev
, msg
);
1080 * scif_alloc_gnt_rej: Respond to SCIF_ALLOC_GNT/REJ interrupt message
1081 * @msg: Interrupt message
1083 * Remote side responded to a memory allocation.
1085 void scif_alloc_gnt_rej(struct scif_dev
*scifdev
, struct scifmsg
*msg
)
1087 struct scif_allocmsg
*handle
= (struct scif_allocmsg
*)msg
->payload
[2];
1088 struct scif_window
*window
= container_of(handle
, struct scif_window
,
1090 struct scif_endpt
*ep
= (struct scif_endpt
*)window
->ep
;
1092 mutex_lock(&ep
->rma_info
.rma_lock
);
1093 handle
->vaddr
= msg
->payload
[0];
1094 handle
->phys_addr
= msg
->payload
[1];
1095 if (msg
->uop
== SCIF_ALLOC_GNT
)
1096 handle
->state
= OP_COMPLETED
;
1098 handle
->state
= OP_FAILED
;
1099 wake_up(&handle
->allocwq
);
1100 mutex_unlock(&ep
->rma_info
.rma_lock
);
1104 * scif_free_virt: Respond to SCIF_FREE_VIRT interrupt message
1105 * @msg: Interrupt message
1107 * Free up memory kmalloc'd earlier.
1109 void scif_free_virt(struct scif_dev
*scifdev
, struct scifmsg
*msg
)
1111 struct scif_window
*window
= (struct scif_window
*)msg
->payload
[1];
1113 scif_destroy_remote_window(window
);
1117 scif_fixup_aper_base(struct scif_dev
*dev
, struct scif_window
*window
)
1120 struct scif_hw_dev
*sdev
= dev
->sdev
;
1121 phys_addr_t apt_base
= 0;
1124 * Add the aperture base if the DMA address is not card relative
1125 * since the DMA addresses need to be an offset into the bar
1127 if (!scifdev_self(dev
) && window
->type
== SCIF_WINDOW_PEER
&&
1128 sdev
->aper
&& !sdev
->card_rel_da
)
1129 apt_base
= sdev
->aper
->pa
;
1133 for (j
= 0; j
< window
->nr_contig_chunks
; j
++) {
1134 if (window
->num_pages
[j
])
1135 window
->dma_addr
[j
] += apt_base
;
1142 * scif_recv_reg: Respond to SCIF_REGISTER interrupt message
1143 * @msg: Interrupt message
1145 * Update remote window list with a new registered window.
1147 void scif_recv_reg(struct scif_dev
*scifdev
, struct scifmsg
*msg
)
1149 struct scif_endpt
*ep
= (struct scif_endpt
*)msg
->payload
[0];
1150 struct scif_window
*window
=
1151 (struct scif_window
*)msg
->payload
[1];
1153 mutex_lock(&ep
->rma_info
.rma_lock
);
1154 spin_lock(&ep
->lock
);
1155 if (ep
->state
== SCIFEP_CONNECTED
) {
1156 msg
->uop
= SCIF_REGISTER_ACK
;
1157 scif_nodeqp_send(ep
->remote_dev
, msg
);
1158 scif_fixup_aper_base(ep
->remote_dev
, window
);
1159 /* No further failures expected. Insert new window */
1160 scif_insert_window(window
, &ep
->rma_info
.remote_reg_list
);
1162 msg
->uop
= SCIF_REGISTER_NACK
;
1163 scif_nodeqp_send(ep
->remote_dev
, msg
);
1165 spin_unlock(&ep
->lock
);
1166 mutex_unlock(&ep
->rma_info
.rma_lock
);
1167 /* free up any lookup resources now that page lists are transferred */
1168 scif_destroy_remote_lookup(ep
->remote_dev
, window
);
1170 * We could not insert the window but we need to
1171 * destroy the window.
1173 if (msg
->uop
== SCIF_REGISTER_NACK
)
1174 scif_destroy_remote_window(window
);
1178 * scif_recv_unreg: Respond to SCIF_UNREGISTER interrupt message
1179 * @msg: Interrupt message
1181 * Remove window from remote registration list;
1183 void scif_recv_unreg(struct scif_dev
*scifdev
, struct scifmsg
*msg
)
1185 struct scif_rma_req req
;
1186 struct scif_window
*window
= NULL
;
1187 struct scif_window
*recv_window
=
1188 (struct scif_window
*)msg
->payload
[0];
1189 struct scif_endpt
*ep
;
1192 ep
= (struct scif_endpt
*)recv_window
->ep
;
1193 req
.out_window
= &window
;
1194 req
.offset
= recv_window
->offset
;
1196 req
.nr_bytes
= recv_window
->nr_pages
<< PAGE_SHIFT
;
1197 req
.type
= SCIF_WINDOW_FULL
;
1198 req
.head
= &ep
->rma_info
.remote_reg_list
;
1199 msg
->payload
[0] = ep
->remote_ep
;
1201 mutex_lock(&ep
->rma_info
.rma_lock
);
1202 /* Does a valid window exist? */
1203 if (scif_query_window(&req
)) {
1204 dev_err(&scifdev
->sdev
->dev
,
1205 "%s %d -ENXIO\n", __func__
, __LINE__
);
1206 msg
->uop
= SCIF_UNREGISTER_ACK
;
1210 if (window
->ref_count
)
1211 scif_put_window(window
, window
->nr_pages
);
1213 dev_err(&scifdev
->sdev
->dev
,
1214 "%s %d ref count should be +ve\n",
1215 __func__
, __LINE__
);
1216 window
->unreg_state
= OP_COMPLETED
;
1217 if (!window
->ref_count
) {
1218 msg
->uop
= SCIF_UNREGISTER_ACK
;
1219 atomic_inc(&ep
->rma_info
.tw_refcount
);
1220 ep
->rma_info
.async_list_del
= 1;
1221 list_del_init(&window
->list
);
1224 /* NACK! There are valid references to this window */
1225 msg
->uop
= SCIF_UNREGISTER_NACK
;
1228 /* The window did not make its way to the list at all. ACK */
1229 msg
->uop
= SCIF_UNREGISTER_ACK
;
1230 scif_destroy_remote_window(recv_window
);
1233 mutex_unlock(&ep
->rma_info
.rma_lock
);
1235 scif_drain_dma_intr(ep
->remote_dev
->sdev
,
1236 ep
->rma_info
.dma_chan
);
1237 scif_nodeqp_send(ep
->remote_dev
, msg
);
1239 scif_queue_for_cleanup(window
, &scif_info
.rma
);
1243 * scif_recv_reg_ack: Respond to SCIF_REGISTER_ACK interrupt message
1244 * @msg: Interrupt message
1246 * Wake up the window waiting to complete registration.
1248 void scif_recv_reg_ack(struct scif_dev
*scifdev
, struct scifmsg
*msg
)
1250 struct scif_window
*window
=
1251 (struct scif_window
*)msg
->payload
[2];
1252 struct scif_endpt
*ep
= (struct scif_endpt
*)window
->ep
;
1254 mutex_lock(&ep
->rma_info
.rma_lock
);
1255 window
->reg_state
= OP_COMPLETED
;
1256 wake_up(&window
->regwq
);
1257 mutex_unlock(&ep
->rma_info
.rma_lock
);
1261 * scif_recv_reg_nack: Respond to SCIF_REGISTER_NACK interrupt message
1262 * @msg: Interrupt message
1264 * Wake up the window waiting to inform it that registration
1265 * cannot be completed.
1267 void scif_recv_reg_nack(struct scif_dev
*scifdev
, struct scifmsg
*msg
)
1269 struct scif_window
*window
=
1270 (struct scif_window
*)msg
->payload
[2];
1271 struct scif_endpt
*ep
= (struct scif_endpt
*)window
->ep
;
1273 mutex_lock(&ep
->rma_info
.rma_lock
);
1274 window
->reg_state
= OP_FAILED
;
1275 wake_up(&window
->regwq
);
1276 mutex_unlock(&ep
->rma_info
.rma_lock
);
1280 * scif_recv_unreg_ack: Respond to SCIF_UNREGISTER_ACK interrupt message
1281 * @msg: Interrupt message
1283 * Wake up the window waiting to complete unregistration.
1285 void scif_recv_unreg_ack(struct scif_dev
*scifdev
, struct scifmsg
*msg
)
1287 struct scif_window
*window
=
1288 (struct scif_window
*)msg
->payload
[1];
1289 struct scif_endpt
*ep
= (struct scif_endpt
*)window
->ep
;
1291 mutex_lock(&ep
->rma_info
.rma_lock
);
1292 window
->unreg_state
= OP_COMPLETED
;
1293 wake_up(&window
->unregwq
);
1294 mutex_unlock(&ep
->rma_info
.rma_lock
);
1298 * scif_recv_unreg_nack: Respond to SCIF_UNREGISTER_NACK interrupt message
1299 * @msg: Interrupt message
1301 * Wake up the window waiting to inform it that unregistration
1302 * cannot be completed immediately.
1304 void scif_recv_unreg_nack(struct scif_dev
*scifdev
, struct scifmsg
*msg
)
1306 struct scif_window
*window
=
1307 (struct scif_window
*)msg
->payload
[1];
1308 struct scif_endpt
*ep
= (struct scif_endpt
*)window
->ep
;
1310 mutex_lock(&ep
->rma_info
.rma_lock
);
1311 window
->unreg_state
= OP_FAILED
;
1312 wake_up(&window
->unregwq
);
1313 mutex_unlock(&ep
->rma_info
.rma_lock
);
1316 int __scif_pin_pages(void *addr
, size_t len
, int *out_prot
,
1317 int map_flags
, scif_pinned_pages_t
*pages
)
1319 struct scif_pinned_pages
*pinned_pages
;
1320 int nr_pages
, err
= 0, i
;
1321 bool vmalloc_addr
= false;
1322 bool try_upgrade
= false;
1323 int prot
= *out_prot
;
1325 struct mm_struct
*mm
= NULL
;
1327 /* Unsupported flags */
1328 if (map_flags
& ~(SCIF_MAP_KERNEL
| SCIF_MAP_ULIMIT
))
1330 ulimit
= !!(map_flags
& SCIF_MAP_ULIMIT
);
1332 /* Unsupported protection requested */
1333 if (prot
& ~(SCIF_PROT_READ
| SCIF_PROT_WRITE
))
1336 /* addr/len must be page aligned. len should be non zero */
1338 (ALIGN((u64
)addr
, PAGE_SIZE
) != (u64
)addr
) ||
1339 (ALIGN((u64
)len
, PAGE_SIZE
) != (u64
)len
))
1344 nr_pages
= len
>> PAGE_SHIFT
;
1346 /* Allocate a set of pinned pages */
1347 pinned_pages
= scif_create_pinned_pages(nr_pages
, prot
);
1351 if (map_flags
& SCIF_MAP_KERNEL
) {
1352 if (is_vmalloc_addr(addr
))
1353 vmalloc_addr
= true;
1355 for (i
= 0; i
< nr_pages
; i
++) {
1357 pinned_pages
->pages
[i
] =
1358 vmalloc_to_page(addr
+ (i
* PAGE_SIZE
));
1360 pinned_pages
->pages
[i
] =
1361 virt_to_page(addr
+ (i
* PAGE_SIZE
));
1363 pinned_pages
->nr_pages
= nr_pages
;
1364 pinned_pages
->map_flags
= SCIF_MAP_KERNEL
;
1367 * SCIF supports registration caching. If a registration has
1368 * been requested with read only permissions, then we try
1369 * to pin the pages with RW permissions so that a subsequent
1370 * transfer with RW permission can hit the cache instead of
1371 * invalidating it. If the upgrade fails with RW then we
1372 * revert back to R permission and retry
1374 if (prot
== SCIF_PROT_READ
)
1376 prot
|= SCIF_PROT_WRITE
;
1380 err
= __scif_check_inc_pinned_vm(mm
, nr_pages
);
1382 pinned_pages
->nr_pages
= 0;
1387 pinned_pages
->nr_pages
= get_user_pages_fast(
1390 (prot
& SCIF_PROT_WRITE
) ? FOLL_WRITE
: 0,
1391 pinned_pages
->pages
);
1392 if (nr_pages
!= pinned_pages
->nr_pages
) {
1395 __scif_dec_pinned_vm_lock(mm
, nr_pages
);
1396 /* Roll back any pinned pages */
1397 for (i
= 0; i
< pinned_pages
->nr_pages
; i
++) {
1398 if (pinned_pages
->pages
[i
])
1400 pinned_pages
->pages
[i
]);
1402 prot
&= ~SCIF_PROT_WRITE
;
1403 try_upgrade
= false;
1407 pinned_pages
->map_flags
= 0;
1410 if (pinned_pages
->nr_pages
< nr_pages
) {
1412 pinned_pages
->nr_pages
= nr_pages
;
1417 atomic_set(&pinned_pages
->ref_count
, 1);
1418 *pages
= pinned_pages
;
1422 __scif_dec_pinned_vm_lock(mm
, nr_pages
);
1423 /* Something went wrong! Rollback */
1425 pinned_pages
->nr_pages
= nr_pages
;
1426 scif_destroy_pinned_pages(pinned_pages
);
1428 dev_dbg(scif_info
.mdev
.this_device
,
1429 "%s %d err %d len 0x%lx\n", __func__
, __LINE__
, err
, len
);
1433 int scif_pin_pages(void *addr
, size_t len
, int prot
,
1434 int map_flags
, scif_pinned_pages_t
*pages
)
1436 return __scif_pin_pages(addr
, len
, &prot
, map_flags
, pages
);
1438 EXPORT_SYMBOL_GPL(scif_pin_pages
);
1440 int scif_unpin_pages(scif_pinned_pages_t pinned_pages
)
1444 if (!pinned_pages
|| SCIFEP_MAGIC
!= pinned_pages
->magic
)
1447 ret
= atomic_sub_return(1, &pinned_pages
->ref_count
);
1449 dev_err(scif_info
.mdev
.this_device
,
1450 "%s %d scif_unpin_pages called without pinning? rc %d\n",
1451 __func__
, __LINE__
, ret
);
1455 * Destroy the window if the ref count for this set of pinned
1456 * pages has dropped to zero. If it is positive then there is
1457 * a valid registered window which is backed by these pages and
1458 * it will be destroyed once all such windows are unregistered.
1461 err
= scif_destroy_pinned_pages(pinned_pages
);
1465 EXPORT_SYMBOL_GPL(scif_unpin_pages
);
1468 scif_insert_local_window(struct scif_window
*window
, struct scif_endpt
*ep
)
1470 mutex_lock(&ep
->rma_info
.rma_lock
);
1471 scif_insert_window(window
, &ep
->rma_info
.reg_list
);
1472 mutex_unlock(&ep
->rma_info
.rma_lock
);
1475 off_t
scif_register_pinned_pages(scif_epd_t epd
,
1476 scif_pinned_pages_t pinned_pages
,
1477 off_t offset
, int map_flags
)
1479 struct scif_endpt
*ep
= (struct scif_endpt
*)epd
;
1480 s64 computed_offset
;
1481 struct scif_window
*window
;
1484 struct device
*spdev
;
1486 /* Unsupported flags */
1487 if (map_flags
& ~SCIF_MAP_FIXED
)
1490 len
= pinned_pages
->nr_pages
<< PAGE_SHIFT
;
1493 * Offset is not page aligned/negative or offset+len
1494 * wraps around with SCIF_MAP_FIXED.
1496 if ((map_flags
& SCIF_MAP_FIXED
) &&
1497 ((ALIGN(offset
, PAGE_SIZE
) != offset
) ||
1499 (len
> LONG_MAX
- offset
)))
1504 err
= scif_verify_epd(ep
);
1508 * It is an error to pass pinned_pages to scif_register_pinned_pages()
1509 * after calling scif_unpin_pages().
1511 if (!atomic_add_unless(&pinned_pages
->ref_count
, 1, 0))
1514 /* Compute the offset for this registration */
1515 err
= scif_get_window_offset(ep
, map_flags
, offset
,
1516 len
, &computed_offset
);
1518 atomic_sub(1, &pinned_pages
->ref_count
);
1522 /* Allocate and prepare self registration window */
1523 window
= scif_create_window(ep
, pinned_pages
->nr_pages
,
1524 computed_offset
, false);
1526 atomic_sub(1, &pinned_pages
->ref_count
);
1527 scif_free_window_offset(ep
, NULL
, computed_offset
);
1531 window
->pinned_pages
= pinned_pages
;
1532 window
->nr_pages
= pinned_pages
->nr_pages
;
1533 window
->prot
= pinned_pages
->prot
;
1535 spdev
= scif_get_peer_dev(ep
->remote_dev
);
1536 if (IS_ERR(spdev
)) {
1537 err
= PTR_ERR(spdev
);
1538 scif_destroy_window(ep
, window
);
1541 err
= scif_send_alloc_request(ep
, window
);
1543 dev_err(&ep
->remote_dev
->sdev
->dev
,
1544 "%s %d err %d\n", __func__
, __LINE__
, err
);
1548 /* Prepare the remote registration window */
1549 err
= scif_prep_remote_window(ep
, window
);
1551 dev_err(&ep
->remote_dev
->sdev
->dev
,
1552 "%s %d err %d\n", __func__
, __LINE__
, err
);
1556 /* Tell the peer about the new window */
1557 err
= scif_send_scif_register(ep
, window
);
1559 dev_err(&ep
->remote_dev
->sdev
->dev
,
1560 "%s %d err %d\n", __func__
, __LINE__
, err
);
1564 scif_put_peer_dev(spdev
);
1565 /* No further failures expected. Insert new window */
1566 scif_insert_local_window(window
, ep
);
1567 return computed_offset
;
1569 scif_destroy_window(ep
, window
);
1570 scif_put_peer_dev(spdev
);
1571 dev_err(&ep
->remote_dev
->sdev
->dev
,
1572 "%s %d err %d\n", __func__
, __LINE__
, err
);
1575 EXPORT_SYMBOL_GPL(scif_register_pinned_pages
);
1577 off_t
scif_register(scif_epd_t epd
, void *addr
, size_t len
, off_t offset
,
1578 int prot
, int map_flags
)
1580 scif_pinned_pages_t pinned_pages
;
1582 struct scif_endpt
*ep
= (struct scif_endpt
*)epd
;
1583 s64 computed_offset
;
1584 struct scif_window
*window
;
1585 struct mm_struct
*mm
= NULL
;
1586 struct device
*spdev
;
1588 dev_dbg(scif_info
.mdev
.this_device
,
1589 "SCIFAPI register: ep %p addr %p len 0x%lx offset 0x%lx prot 0x%x map_flags 0x%x\n",
1590 epd
, addr
, len
, offset
, prot
, map_flags
);
1591 /* Unsupported flags */
1592 if (map_flags
& ~(SCIF_MAP_FIXED
| SCIF_MAP_KERNEL
))
1596 * Offset is not page aligned/negative or offset+len
1597 * wraps around with SCIF_MAP_FIXED.
1599 if ((map_flags
& SCIF_MAP_FIXED
) &&
1600 ((ALIGN(offset
, PAGE_SIZE
) != offset
) ||
1602 (len
> LONG_MAX
- offset
)))
1605 /* Unsupported protection requested */
1606 if (prot
& ~(SCIF_PROT_READ
| SCIF_PROT_WRITE
))
1609 /* addr/len must be page aligned. len should be non zero */
1610 if (!len
|| (ALIGN((u64
)addr
, PAGE_SIZE
) != (u64
)addr
) ||
1611 (ALIGN(len
, PAGE_SIZE
) != len
))
1616 err
= scif_verify_epd(ep
);
1620 /* Compute the offset for this registration */
1621 err
= scif_get_window_offset(ep
, map_flags
, offset
,
1622 len
>> PAGE_SHIFT
, &computed_offset
);
1626 spdev
= scif_get_peer_dev(ep
->remote_dev
);
1627 if (IS_ERR(spdev
)) {
1628 err
= PTR_ERR(spdev
);
1629 scif_free_window_offset(ep
, NULL
, computed_offset
);
1632 /* Allocate and prepare self registration window */
1633 window
= scif_create_window(ep
, len
>> PAGE_SHIFT
,
1634 computed_offset
, false);
1636 scif_free_window_offset(ep
, NULL
, computed_offset
);
1637 scif_put_peer_dev(spdev
);
1641 window
->nr_pages
= len
>> PAGE_SHIFT
;
1643 err
= scif_send_alloc_request(ep
, window
);
1645 scif_destroy_incomplete_window(ep
, window
);
1646 scif_put_peer_dev(spdev
);
1650 if (!(map_flags
& SCIF_MAP_KERNEL
)) {
1651 mm
= __scif_acquire_mm();
1652 map_flags
|= SCIF_MAP_ULIMIT
;
1654 /* Pin down the pages */
1655 err
= __scif_pin_pages(addr
, len
, &prot
,
1656 map_flags
& (SCIF_MAP_KERNEL
| SCIF_MAP_ULIMIT
),
1659 scif_destroy_incomplete_window(ep
, window
);
1660 __scif_release_mm(mm
);
1664 window
->pinned_pages
= pinned_pages
;
1665 window
->prot
= pinned_pages
->prot
;
1668 /* Prepare the remote registration window */
1669 err
= scif_prep_remote_window(ep
, window
);
1671 dev_err(&ep
->remote_dev
->sdev
->dev
,
1672 "%s %d err %ld\n", __func__
, __LINE__
, err
);
1676 /* Tell the peer about the new window */
1677 err
= scif_send_scif_register(ep
, window
);
1679 dev_err(&ep
->remote_dev
->sdev
->dev
,
1680 "%s %d err %ld\n", __func__
, __LINE__
, err
);
1684 scif_put_peer_dev(spdev
);
1685 /* No further failures expected. Insert new window */
1686 scif_insert_local_window(window
, ep
);
1687 dev_dbg(&ep
->remote_dev
->sdev
->dev
,
1688 "SCIFAPI register: ep %p addr %p len 0x%lx computed_offset 0x%llx\n",
1689 epd
, addr
, len
, computed_offset
);
1690 return computed_offset
;
1692 scif_destroy_window(ep
, window
);
1694 scif_put_peer_dev(spdev
);
1695 dev_err(&ep
->remote_dev
->sdev
->dev
,
1696 "%s %d err %ld\n", __func__
, __LINE__
, err
);
1699 EXPORT_SYMBOL_GPL(scif_register
);
1702 scif_unregister(scif_epd_t epd
, off_t offset
, size_t len
)
1704 struct scif_endpt
*ep
= (struct scif_endpt
*)epd
;
1705 struct scif_window
*window
= NULL
;
1706 struct scif_rma_req req
;
1708 struct device
*spdev
;
1710 dev_dbg(scif_info
.mdev
.this_device
,
1711 "SCIFAPI unregister: ep %p offset 0x%lx len 0x%lx\n",
1713 /* len must be page aligned. len should be non zero */
1715 (ALIGN((u64
)len
, PAGE_SIZE
) != (u64
)len
))
1718 /* Offset is not page aligned or offset+len wraps around */
1719 if ((ALIGN(offset
, PAGE_SIZE
) != offset
) ||
1721 (len
> LONG_MAX
- offset
))
1724 err
= scif_verify_epd(ep
);
1729 nr_pages
= len
>> PAGE_SHIFT
;
1731 req
.out_window
= &window
;
1732 req
.offset
= offset
;
1735 req
.type
= SCIF_WINDOW_FULL
;
1736 req
.head
= &ep
->rma_info
.reg_list
;
1738 spdev
= scif_get_peer_dev(ep
->remote_dev
);
1739 if (IS_ERR(spdev
)) {
1740 err
= PTR_ERR(spdev
);
1743 mutex_lock(&ep
->rma_info
.rma_lock
);
1744 /* Does a valid window exist? */
1745 err
= scif_query_window(&req
);
1747 dev_err(&ep
->remote_dev
->sdev
->dev
,
1748 "%s %d err %d\n", __func__
, __LINE__
, err
);
1751 /* Unregister all the windows in this range */
1752 err
= scif_rma_list_unregister(window
, offset
, nr_pages
);
1754 dev_err(&ep
->remote_dev
->sdev
->dev
,
1755 "%s %d err %d\n", __func__
, __LINE__
, err
);
1757 mutex_unlock(&ep
->rma_info
.rma_lock
);
1758 scif_put_peer_dev(spdev
);
1761 EXPORT_SYMBOL_GPL(scif_unregister
);