]>
Commit | Line | Data |
---|---|---|
8ada2c1c SR |
1 | /* |
2 | * Copyright (c) 2014 Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #ifndef IB_UMEM_ODP_H | |
34 | #define IB_UMEM_ODP_H | |
35 | ||
36 | #include <rdma/ib_umem.h> | |
882214e2 HE |
37 | #include <rdma/ib_verbs.h> |
38 | #include <linux/interval_tree.h> | |
39 | ||
40 | struct umem_odp_node { | |
41 | u64 __subtree_last; | |
42 | struct rb_node rb; | |
43 | }; | |
8ada2c1c SR |
44 | |
45 | struct ib_umem_odp { | |
46 | /* | |
47 | * An array of the pages included in the on-demand paging umem. | |
48 | * Indices of pages that are currently not mapped into the device will | |
49 | * contain NULL. | |
50 | */ | |
51 | struct page **page_list; | |
52 | /* | |
53 | * An array of the same size as page_list, with DMA addresses mapped | |
54 | * for pages the pages in page_list. The lower two bits designate | |
55 | * access permissions. See ODP_READ_ALLOWED_BIT and | |
56 | * ODP_WRITE_ALLOWED_BIT. | |
57 | */ | |
58 | dma_addr_t *dma_list; | |
59 | /* | |
60 | * The umem_mutex protects the page_list and dma_list fields of an ODP | |
882214e2 HE |
61 | * umem, allowing only a single thread to map/unmap pages. The mutex |
62 | * also protects access to the mmu notifier counters. | |
8ada2c1c SR |
63 | */ |
64 | struct mutex umem_mutex; | |
65 | void *private; /* for the HW driver to use. */ | |
882214e2 HE |
66 | |
67 | /* When false, use the notifier counter in the ucontext struct. */ | |
68 | bool mn_counters_active; | |
69 | int notifiers_seq; | |
70 | int notifiers_count; | |
71 | ||
72 | /* A linked list of umems that don't have private mmu notifier | |
73 | * counters yet. */ | |
74 | struct list_head no_private_counters; | |
75 | struct ib_umem *umem; | |
76 | ||
77 | /* Tree tracking */ | |
78 | struct umem_odp_node interval_tree; | |
79 | ||
80 | struct completion notifier_completion; | |
81 | int dying; | |
d07d1d70 | 82 | struct work_struct work; |
8ada2c1c SR |
83 | }; |
84 | ||
85 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | |
86 | ||
0008b84e AK |
87 | int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem, |
88 | int access); | |
d07d1d70 AK |
89 | struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context, |
90 | unsigned long addr, | |
91 | size_t size); | |
8ada2c1c SR |
92 | |
93 | void ib_umem_odp_release(struct ib_umem *umem); | |
94 | ||
95 | /* | |
96 | * The lower 2 bits of the DMA address signal the R/W permissions for | |
97 | * the entry. To upgrade the permissions, provide the appropriate | |
98 | * bitmask to the map_dma_pages function. | |
99 | * | |
100 | * Be aware that upgrading a mapped address might result in change of | |
101 | * the DMA address for the page. | |
102 | */ | |
103 | #define ODP_READ_ALLOWED_BIT (1<<0ULL) | |
104 | #define ODP_WRITE_ALLOWED_BIT (1<<1ULL) | |
105 | ||
106 | #define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) | |
107 | ||
108 | int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt, | |
109 | u64 access_mask, unsigned long current_seq); | |
110 | ||
111 | void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset, | |
112 | u64 bound); | |
113 | ||
882214e2 HE |
114 | void rbt_ib_umem_insert(struct umem_odp_node *node, struct rb_root *root); |
115 | void rbt_ib_umem_remove(struct umem_odp_node *node, struct rb_root *root); | |
116 | typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end, | |
117 | void *cookie); | |
118 | /* | |
119 | * Call the callback on each ib_umem in the range. Returns the logical or of | |
120 | * the return values of the functions called. | |
121 | */ | |
122 | int rbt_ib_umem_for_each_in_range(struct rb_root *root, u64 start, u64 end, | |
123 | umem_call_back cb, void *cookie); | |
124 | ||
d07d1d70 AK |
125 | /* |
126 | * Find first region intersecting with address range. | |
127 | * Return NULL if not found | |
128 | */ | |
129 | struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root *root, | |
130 | u64 addr, u64 length); | |
882214e2 HE |
131 | |
132 | static inline int ib_umem_mmu_notifier_retry(struct ib_umem *item, | |
133 | unsigned long mmu_seq) | |
134 | { | |
135 | /* | |
136 | * This code is strongly based on the KVM code from | |
137 | * mmu_notifier_retry. Should be called with | |
138 | * the relevant locks taken (item->odp_data->umem_mutex | |
139 | * and the ucontext umem_mutex semaphore locked for read). | |
140 | */ | |
141 | ||
142 | /* Do not allow page faults while the new ib_umem hasn't seen a state | |
143 | * with zero notifiers yet, and doesn't have its own valid set of | |
144 | * private counters. */ | |
145 | if (!item->odp_data->mn_counters_active) | |
146 | return 1; | |
147 | ||
148 | if (unlikely(item->odp_data->notifiers_count)) | |
149 | return 1; | |
150 | if (item->odp_data->notifiers_seq != mmu_seq) | |
151 | return 1; | |
152 | return 0; | |
153 | } | |
154 | ||
8ada2c1c SR |
155 | #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ |
156 | ||
157 | static inline int ib_umem_odp_get(struct ib_ucontext *context, | |
0008b84e AK |
158 | struct ib_umem *umem, |
159 | int access) | |
8ada2c1c SR |
160 | { |
161 | return -EINVAL; | |
162 | } | |
163 | ||
d07d1d70 AK |
164 | static inline struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context, |
165 | unsigned long addr, | |
166 | size_t size) | |
167 | { | |
168 | return ERR_PTR(-EINVAL); | |
169 | } | |
170 | ||
8ada2c1c SR |
171 | static inline void ib_umem_odp_release(struct ib_umem *umem) {} |
172 | ||
173 | #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ | |
174 | ||
175 | #endif /* IB_UMEM_ODP_H */ |