]>
Commit | Line | Data |
---|---|---|
685a6bf8 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
20259849 GZ |
2 | /* |
3 | * VMware VMCI Driver | |
4 | * | |
5 | * Copyright (C) 2012 VMware, Inc. All rights reserved. | |
20259849 GZ |
6 | */ |
7 | ||
8 | #ifndef _VMW_VMCI_DEF_H_ | |
9 | #define _VMW_VMCI_DEF_H_ | |
10 | ||
11 | #include <linux/atomic.h> | |
9a41691e | 12 | #include <linux/bits.h> |
20259849 GZ |
13 | |
14 | /* Register offsets. */ | |
15 | #define VMCI_STATUS_ADDR 0x00 | |
16 | #define VMCI_CONTROL_ADDR 0x04 | |
17 | #define VMCI_ICR_ADDR 0x08 | |
18 | #define VMCI_IMR_ADDR 0x0c | |
19 | #define VMCI_DATA_OUT_ADDR 0x10 | |
20 | #define VMCI_DATA_IN_ADDR 0x14 | |
21 | #define VMCI_CAPS_ADDR 0x18 | |
22 | #define VMCI_RESULT_LOW_ADDR 0x1c | |
23 | #define VMCI_RESULT_HIGH_ADDR 0x20 | |
24 | ||
25 | /* Max number of devices. */ | |
26 | #define VMCI_MAX_DEVICES 1 | |
27 | ||
28 | /* Status register bits. */ | |
9a41691e | 29 | #define VMCI_STATUS_INT_ON BIT(0) |
20259849 GZ |
30 | |
31 | /* Control register bits. */ | |
9a41691e VD |
32 | #define VMCI_CONTROL_RESET BIT(0) |
33 | #define VMCI_CONTROL_INT_ENABLE BIT(1) | |
34 | #define VMCI_CONTROL_INT_DISABLE BIT(2) | |
20259849 GZ |
35 | |
36 | /* Capabilities register bits. */ | |
9a41691e VD |
37 | #define VMCI_CAPS_HYPERCALL BIT(0) |
38 | #define VMCI_CAPS_GUESTCALL BIT(1) | |
39 | #define VMCI_CAPS_DATAGRAM BIT(2) | |
40 | #define VMCI_CAPS_NOTIFICATIONS BIT(3) | |
41 | #define VMCI_CAPS_PPN64 BIT(4) | |
20259849 GZ |
42 | |
43 | /* Interrupt Cause register bits. */ | |
9a41691e VD |
44 | #define VMCI_ICR_DATAGRAM BIT(0) |
45 | #define VMCI_ICR_NOTIFICATION BIT(1) | |
20259849 GZ |
46 | |
47 | /* Interrupt Mask register bits. */ | |
9a41691e VD |
48 | #define VMCI_IMR_DATAGRAM BIT(0) |
49 | #define VMCI_IMR_NOTIFICATION BIT(1) | |
20259849 | 50 | |
20259849 GZ |
51 | /* Maximum MSI/MSI-X interrupt vectors in the device. */ |
52 | #define VMCI_MAX_INTRS 2 | |
53 | ||
54 | /* | |
55 | * Supported interrupt vectors. There is one for each ICR value above, | |
56 | * but here they indicate the position in the vector array/message ID. | |
57 | */ | |
58 | enum { | |
59 | VMCI_INTR_DATAGRAM = 0, | |
60 | VMCI_INTR_NOTIFICATION = 1, | |
61 | }; | |
62 | ||
63 | /* | |
64 | * A single VMCI device has an upper limit of 128MB on the amount of | |
65 | * memory that can be used for queue pairs. | |
66 | */ | |
67 | #define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024) | |
68 | ||
69 | /* | |
70 | * Queues with pre-mapped data pages must be small, so that we don't pin | |
71 | * too much kernel memory (especially on vmkernel). We limit a queuepair to | |
72 | * 32 KB, or 16 KB per queue for symmetrical pairs. | |
73 | */ | |
74 | #define VMCI_MAX_PINNED_QP_MEMORY (32 * 1024) | |
75 | ||
76 | /* | |
77 | * We have a fixed set of resource IDs available in the VMX. | |
78 | * This allows us to have a very simple implementation since we statically | |
79 | * know how many will create datagram handles. If a new caller arrives and | |
80 | * we have run out of slots we can manually increment the maximum size of | |
81 | * available resource IDs. | |
82 | * | |
83 | * VMCI reserved hypervisor datagram resource IDs. | |
84 | */ | |
85 | enum { | |
86 | VMCI_RESOURCES_QUERY = 0, | |
87 | VMCI_GET_CONTEXT_ID = 1, | |
88 | VMCI_SET_NOTIFY_BITMAP = 2, | |
89 | VMCI_DOORBELL_LINK = 3, | |
90 | VMCI_DOORBELL_UNLINK = 4, | |
91 | VMCI_DOORBELL_NOTIFY = 5, | |
92 | /* | |
93 | * VMCI_DATAGRAM_REQUEST_MAP and VMCI_DATAGRAM_REMOVE_MAP are | |
94 | * obsoleted by the removal of VM to VM communication. | |
95 | */ | |
96 | VMCI_DATAGRAM_REQUEST_MAP = 6, | |
97 | VMCI_DATAGRAM_REMOVE_MAP = 7, | |
98 | VMCI_EVENT_SUBSCRIBE = 8, | |
99 | VMCI_EVENT_UNSUBSCRIBE = 9, | |
100 | VMCI_QUEUEPAIR_ALLOC = 10, | |
101 | VMCI_QUEUEPAIR_DETACH = 11, | |
102 | ||
103 | /* | |
104 | * VMCI_VSOCK_VMX_LOOKUP was assigned to 12 for Fusion 3.0/3.1, | |
105 | * WS 7.0/7.1 and ESX 4.1 | |
106 | */ | |
107 | VMCI_HGFS_TRANSPORT = 13, | |
108 | VMCI_UNITY_PBRPC_REGISTER = 14, | |
109 | VMCI_RPC_PRIVILEGED = 15, | |
110 | VMCI_RPC_UNPRIVILEGED = 16, | |
111 | VMCI_RESOURCE_MAX = 17, | |
112 | }; | |
113 | ||
114 | /* | |
115 | * struct vmci_handle - Ownership information structure | |
116 | * @context: The VMX context ID. | |
117 | * @resource: The resource ID (used for locating in resource hash). | |
118 | * | |
119 | * The vmci_handle structure is used to track resources used within | |
120 | * vmw_vmci. | |
121 | */ | |
122 | struct vmci_handle { | |
123 | u32 context; | |
124 | u32 resource; | |
125 | }; | |
126 | ||
127 | #define vmci_make_handle(_cid, _rid) \ | |
128 | (struct vmci_handle){ .context = _cid, .resource = _rid } | |
129 | ||
130 | static inline bool vmci_handle_is_equal(struct vmci_handle h1, | |
131 | struct vmci_handle h2) | |
132 | { | |
133 | return h1.context == h2.context && h1.resource == h2.resource; | |
134 | } | |
135 | ||
136 | #define VMCI_INVALID_ID ~0 | |
137 | static const struct vmci_handle VMCI_INVALID_HANDLE = { | |
138 | .context = VMCI_INVALID_ID, | |
139 | .resource = VMCI_INVALID_ID | |
140 | }; | |
141 | ||
142 | static inline bool vmci_handle_is_invalid(struct vmci_handle h) | |
143 | { | |
144 | return vmci_handle_is_equal(h, VMCI_INVALID_HANDLE); | |
145 | } | |
146 | ||
147 | /* | |
148 | * The below defines can be used to send anonymous requests. | |
149 | * This also indicates that no response is expected. | |
150 | */ | |
151 | #define VMCI_ANON_SRC_CONTEXT_ID VMCI_INVALID_ID | |
152 | #define VMCI_ANON_SRC_RESOURCE_ID VMCI_INVALID_ID | |
153 | static const struct vmci_handle VMCI_ANON_SRC_HANDLE = { | |
154 | .context = VMCI_ANON_SRC_CONTEXT_ID, | |
155 | .resource = VMCI_ANON_SRC_RESOURCE_ID | |
156 | }; | |
157 | ||
158 | /* The lowest 16 context ids are reserved for internal use. */ | |
159 | #define VMCI_RESERVED_CID_LIMIT ((u32) 16) | |
160 | ||
161 | /* | |
162 | * Hypervisor context id, used for calling into hypervisor | |
163 | * supplied services from the VM. | |
164 | */ | |
165 | #define VMCI_HYPERVISOR_CONTEXT_ID 0 | |
166 | ||
167 | /* | |
168 | * Well-known context id, a logical context that contains a set of | |
169 | * well-known services. This context ID is now obsolete. | |
170 | */ | |
171 | #define VMCI_WELL_KNOWN_CONTEXT_ID 1 | |
172 | ||
173 | /* | |
174 | * Context ID used by host endpoints. | |
175 | */ | |
176 | #define VMCI_HOST_CONTEXT_ID 2 | |
177 | ||
178 | #define VMCI_CONTEXT_IS_VM(_cid) (VMCI_INVALID_ID != (_cid) && \ | |
179 | (_cid) > VMCI_HOST_CONTEXT_ID) | |
180 | ||
181 | /* | |
182 | * The VMCI_CONTEXT_RESOURCE_ID is used together with vmci_make_handle to make | |
183 | * handles that refer to a specific context. | |
184 | */ | |
185 | #define VMCI_CONTEXT_RESOURCE_ID 0 | |
186 | ||
187 | /* | |
188 | * VMCI error codes. | |
189 | */ | |
190 | enum { | |
191 | VMCI_SUCCESS_QUEUEPAIR_ATTACH = 5, | |
192 | VMCI_SUCCESS_QUEUEPAIR_CREATE = 4, | |
193 | VMCI_SUCCESS_LAST_DETACH = 3, | |
194 | VMCI_SUCCESS_ACCESS_GRANTED = 2, | |
195 | VMCI_SUCCESS_ENTRY_DEAD = 1, | |
196 | VMCI_SUCCESS = 0, | |
197 | VMCI_ERROR_INVALID_RESOURCE = (-1), | |
198 | VMCI_ERROR_INVALID_ARGS = (-2), | |
199 | VMCI_ERROR_NO_MEM = (-3), | |
200 | VMCI_ERROR_DATAGRAM_FAILED = (-4), | |
201 | VMCI_ERROR_MORE_DATA = (-5), | |
202 | VMCI_ERROR_NO_MORE_DATAGRAMS = (-6), | |
203 | VMCI_ERROR_NO_ACCESS = (-7), | |
204 | VMCI_ERROR_NO_HANDLE = (-8), | |
205 | VMCI_ERROR_DUPLICATE_ENTRY = (-9), | |
206 | VMCI_ERROR_DST_UNREACHABLE = (-10), | |
207 | VMCI_ERROR_PAYLOAD_TOO_LARGE = (-11), | |
208 | VMCI_ERROR_INVALID_PRIV = (-12), | |
209 | VMCI_ERROR_GENERIC = (-13), | |
210 | VMCI_ERROR_PAGE_ALREADY_SHARED = (-14), | |
211 | VMCI_ERROR_CANNOT_SHARE_PAGE = (-15), | |
212 | VMCI_ERROR_CANNOT_UNSHARE_PAGE = (-16), | |
213 | VMCI_ERROR_NO_PROCESS = (-17), | |
214 | VMCI_ERROR_NO_DATAGRAM = (-18), | |
215 | VMCI_ERROR_NO_RESOURCES = (-19), | |
216 | VMCI_ERROR_UNAVAILABLE = (-20), | |
217 | VMCI_ERROR_NOT_FOUND = (-21), | |
218 | VMCI_ERROR_ALREADY_EXISTS = (-22), | |
219 | VMCI_ERROR_NOT_PAGE_ALIGNED = (-23), | |
220 | VMCI_ERROR_INVALID_SIZE = (-24), | |
221 | VMCI_ERROR_REGION_ALREADY_SHARED = (-25), | |
222 | VMCI_ERROR_TIMEOUT = (-26), | |
223 | VMCI_ERROR_DATAGRAM_INCOMPLETE = (-27), | |
224 | VMCI_ERROR_INCORRECT_IRQL = (-28), | |
225 | VMCI_ERROR_EVENT_UNKNOWN = (-29), | |
226 | VMCI_ERROR_OBSOLETE = (-30), | |
227 | VMCI_ERROR_QUEUEPAIR_MISMATCH = (-31), | |
228 | VMCI_ERROR_QUEUEPAIR_NOTSET = (-32), | |
229 | VMCI_ERROR_QUEUEPAIR_NOTOWNER = (-33), | |
230 | VMCI_ERROR_QUEUEPAIR_NOTATTACHED = (-34), | |
231 | VMCI_ERROR_QUEUEPAIR_NOSPACE = (-35), | |
232 | VMCI_ERROR_QUEUEPAIR_NODATA = (-36), | |
233 | VMCI_ERROR_BUSMEM_INVALIDATION = (-37), | |
234 | VMCI_ERROR_MODULE_NOT_LOADED = (-38), | |
235 | VMCI_ERROR_DEVICE_NOT_FOUND = (-39), | |
236 | VMCI_ERROR_QUEUEPAIR_NOT_READY = (-40), | |
237 | VMCI_ERROR_WOULD_BLOCK = (-41), | |
238 | ||
239 | /* VMCI clients should return error code within this range */ | |
240 | VMCI_ERROR_CLIENT_MIN = (-500), | |
241 | VMCI_ERROR_CLIENT_MAX = (-550), | |
242 | ||
243 | /* Internal error codes. */ | |
244 | VMCI_SHAREDMEM_ERROR_BAD_CONTEXT = (-1000), | |
245 | }; | |
246 | ||
247 | /* VMCI reserved events. */ | |
248 | enum { | |
249 | /* Only applicable to guest endpoints */ | |
250 | VMCI_EVENT_CTX_ID_UPDATE = 0, | |
251 | ||
252 | /* Applicable to guest and host */ | |
253 | VMCI_EVENT_CTX_REMOVED = 1, | |
254 | ||
255 | /* Only applicable to guest endpoints */ | |
256 | VMCI_EVENT_QP_RESUMED = 2, | |
257 | ||
258 | /* Applicable to guest and host */ | |
259 | VMCI_EVENT_QP_PEER_ATTACH = 3, | |
260 | ||
261 | /* Applicable to guest and host */ | |
262 | VMCI_EVENT_QP_PEER_DETACH = 4, | |
263 | ||
264 | /* | |
265 | * Applicable to VMX and vmk. On vmk, | |
266 | * this event has the Context payload type. | |
267 | */ | |
268 | VMCI_EVENT_MEM_ACCESS_ON = 5, | |
269 | ||
270 | /* | |
271 | * Applicable to VMX and vmk. Same as | |
272 | * above for the payload type. | |
273 | */ | |
274 | VMCI_EVENT_MEM_ACCESS_OFF = 6, | |
275 | VMCI_EVENT_MAX = 7, | |
276 | }; | |
277 | ||
278 | /* | |
279 | * Of the above events, a few are reserved for use in the VMX, and | |
280 | * other endpoints (guest and host kernel) should not use them. For | |
281 | * the rest of the events, we allow both host and guest endpoints to | |
282 | * subscribe to them, to maintain the same API for host and guest | |
283 | * endpoints. | |
284 | */ | |
285 | #define VMCI_EVENT_VALID_VMX(_event) ((_event) == VMCI_EVENT_MEM_ACCESS_ON || \ | |
286 | (_event) == VMCI_EVENT_MEM_ACCESS_OFF) | |
287 | ||
288 | #define VMCI_EVENT_VALID(_event) ((_event) < VMCI_EVENT_MAX && \ | |
289 | !VMCI_EVENT_VALID_VMX(_event)) | |
290 | ||
291 | /* Reserved guest datagram resource ids. */ | |
292 | #define VMCI_EVENT_HANDLER 0 | |
293 | ||
294 | /* | |
295 | * VMCI coarse-grained privileges (per context or host | |
296 | * process/endpoint. An entity with the restricted flag is only | |
297 | * allowed to interact with the hypervisor and trusted entities. | |
298 | */ | |
299 | enum { | |
300 | VMCI_NO_PRIVILEGE_FLAGS = 0, | |
301 | VMCI_PRIVILEGE_FLAG_RESTRICTED = 1, | |
302 | VMCI_PRIVILEGE_FLAG_TRUSTED = 2, | |
303 | VMCI_PRIVILEGE_ALL_FLAGS = (VMCI_PRIVILEGE_FLAG_RESTRICTED | | |
304 | VMCI_PRIVILEGE_FLAG_TRUSTED), | |
305 | VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS = VMCI_NO_PRIVILEGE_FLAGS, | |
306 | VMCI_LEAST_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_RESTRICTED, | |
307 | VMCI_MAX_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_TRUSTED, | |
308 | }; | |
309 | ||
310 | /* 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved. */ | |
311 | #define VMCI_RESERVED_RESOURCE_ID_MAX 1023 | |
312 | ||
313 | /* | |
314 | * Driver version. | |
315 | * | |
316 | * Increment major version when you make an incompatible change. | |
317 | * Compatibility goes both ways (old driver with new executable | |
318 | * as well as new driver with old executable). | |
319 | */ | |
320 | ||
321 | /* Never change VMCI_VERSION_SHIFT_WIDTH */ | |
322 | #define VMCI_VERSION_SHIFT_WIDTH 16 | |
323 | #define VMCI_MAKE_VERSION(_major, _minor) \ | |
324 | ((_major) << VMCI_VERSION_SHIFT_WIDTH | (u16) (_minor)) | |
325 | ||
326 | #define VMCI_VERSION_MAJOR(v) ((u32) (v) >> VMCI_VERSION_SHIFT_WIDTH) | |
327 | #define VMCI_VERSION_MINOR(v) ((u16) (v)) | |
328 | ||
329 | /* | |
330 | * VMCI_VERSION is always the current version. Subsequently listed | |
331 | * versions are ways of detecting previous versions of the connecting | |
332 | * application (i.e., VMX). | |
333 | * | |
334 | * VMCI_VERSION_NOVMVM: This version removed support for VM to VM | |
335 | * communication. | |
336 | * | |
337 | * VMCI_VERSION_NOTIFY: This version introduced doorbell notification | |
338 | * support. | |
339 | * | |
340 | * VMCI_VERSION_HOSTQP: This version introduced host end point support | |
341 | * for hosted products. | |
342 | * | |
343 | * VMCI_VERSION_PREHOSTQP: This is the version prior to the adoption of | |
344 | * support for host end-points. | |
345 | * | |
346 | * VMCI_VERSION_PREVERS2: This fictional version number is intended to | |
347 | * represent the version of a VMX which doesn't call into the driver | |
348 | * with ioctl VERSION2 and thus doesn't establish its version with the | |
349 | * driver. | |
350 | */ | |
351 | ||
352 | #define VMCI_VERSION VMCI_VERSION_NOVMVM | |
353 | #define VMCI_VERSION_NOVMVM VMCI_MAKE_VERSION(11, 0) | |
354 | #define VMCI_VERSION_NOTIFY VMCI_MAKE_VERSION(10, 0) | |
355 | #define VMCI_VERSION_HOSTQP VMCI_MAKE_VERSION(9, 0) | |
356 | #define VMCI_VERSION_PREHOSTQP VMCI_MAKE_VERSION(8, 0) | |
357 | #define VMCI_VERSION_PREVERS2 VMCI_MAKE_VERSION(1, 0) | |
358 | ||
359 | #define VMCI_SOCKETS_MAKE_VERSION(_p) \ | |
360 | ((((_p)[0] & 0xFF) << 24) | (((_p)[1] & 0xFF) << 16) | ((_p)[2])) | |
361 | ||
362 | /* | |
363 | * The VMCI IOCTLs. We use identity code 7, as noted in ioctl-number.h, and | |
364 | * we start at sequence 9f. This gives us the same values that our shipping | |
365 | * products use, starting at 1951, provided we leave out the direction and | |
366 | * structure size. Note that VMMon occupies the block following us, starting | |
367 | * at 2001. | |
368 | */ | |
369 | #define IOCTL_VMCI_VERSION _IO(7, 0x9f) /* 1951 */ | |
370 | #define IOCTL_VMCI_INIT_CONTEXT _IO(7, 0xa0) | |
371 | #define IOCTL_VMCI_QUEUEPAIR_SETVA _IO(7, 0xa4) | |
372 | #define IOCTL_VMCI_NOTIFY_RESOURCE _IO(7, 0xa5) | |
373 | #define IOCTL_VMCI_NOTIFICATIONS_RECEIVE _IO(7, 0xa6) | |
374 | #define IOCTL_VMCI_VERSION2 _IO(7, 0xa7) | |
375 | #define IOCTL_VMCI_QUEUEPAIR_ALLOC _IO(7, 0xa8) | |
376 | #define IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE _IO(7, 0xa9) | |
377 | #define IOCTL_VMCI_QUEUEPAIR_DETACH _IO(7, 0xaa) | |
378 | #define IOCTL_VMCI_DATAGRAM_SEND _IO(7, 0xab) | |
379 | #define IOCTL_VMCI_DATAGRAM_RECEIVE _IO(7, 0xac) | |
380 | #define IOCTL_VMCI_CTX_ADD_NOTIFICATION _IO(7, 0xaf) | |
381 | #define IOCTL_VMCI_CTX_REMOVE_NOTIFICATION _IO(7, 0xb0) | |
382 | #define IOCTL_VMCI_CTX_GET_CPT_STATE _IO(7, 0xb1) | |
383 | #define IOCTL_VMCI_CTX_SET_CPT_STATE _IO(7, 0xb2) | |
384 | #define IOCTL_VMCI_GET_CONTEXT_ID _IO(7, 0xb3) | |
385 | #define IOCTL_VMCI_SOCKETS_VERSION _IO(7, 0xb4) | |
386 | #define IOCTL_VMCI_SOCKETS_GET_AF_VALUE _IO(7, 0xb8) | |
387 | #define IOCTL_VMCI_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9) | |
388 | #define IOCTL_VMCI_SET_NOTIFY _IO(7, 0xcb) /* 1995 */ | |
389 | /*IOCTL_VMMON_START _IO(7, 0xd1)*/ /* 2001 */ | |
390 | ||
391 | /* | |
392 | * struct vmci_queue_header - VMCI Queue Header information. | |
393 | * | |
394 | * A Queue cannot stand by itself as designed. Each Queue's header | |
395 | * contains a pointer into itself (the producer_tail) and into its peer | |
396 | * (consumer_head). The reason for the separation is one of | |
397 | * accessibility: Each end-point can modify two things: where the next | |
398 | * location to enqueue is within its produce_q (producer_tail); and | |
399 | * where the next dequeue location is in its consume_q (consumer_head). | |
400 | * | |
401 | * An end-point cannot modify the pointers of its peer (guest to | |
402 | * guest; NOTE that in the host both queue headers are mapped r/w). | |
403 | * But, each end-point needs read access to both Queue header | |
404 | * structures in order to determine how much space is used (or left) | |
405 | * in the Queue. This is because for an end-point to know how full | |
406 | * its produce_q is, it needs to use the consumer_head that points into | |
407 | * the produce_q but -that- consumer_head is in the Queue header for | |
408 | * that end-points consume_q. | |
409 | * | |
410 | * Thoroughly confused? Sorry. | |
411 | * | |
412 | * producer_tail: the point to enqueue new entrants. When you approach | |
413 | * a line in a store, for example, you walk up to the tail. | |
414 | * | |
415 | * consumer_head: the point in the queue from which the next element is | |
416 | * dequeued. In other words, who is next in line is he who is at the | |
417 | * head of the line. | |
418 | * | |
419 | * Also, producer_tail points to an empty byte in the Queue, whereas | |
420 | * consumer_head points to a valid byte of data (unless producer_tail == | |
421 | * consumer_head in which case consumer_head does not point to a valid | |
422 | * byte of data). | |
423 | * | |
424 | * For a queue of buffer 'size' bytes, the tail and head pointers will be in | |
425 | * the range [0, size-1]. | |
426 | * | |
427 | * If produce_q_header->producer_tail == consume_q_header->consumer_head | |
428 | * then the produce_q is empty. | |
429 | */ | |
430 | struct vmci_queue_header { | |
431 | /* All fields are 64bit and aligned. */ | |
432 | struct vmci_handle handle; /* Identifier. */ | |
433 | atomic64_t producer_tail; /* Offset in this queue. */ | |
434 | atomic64_t consumer_head; /* Offset in peer queue. */ | |
435 | }; | |
436 | ||
437 | /* | |
438 | * struct vmci_datagram - Base struct for vmci datagrams. | |
439 | * @dst: A vmci_handle that tracks the destination of the datagram. | |
440 | * @src: A vmci_handle that tracks the source of the datagram. | |
441 | * @payload_size: The size of the payload. | |
442 | * | |
443 | * vmci_datagram structs are used when sending vmci datagrams. They include | |
444 | * the necessary source and destination information to properly route | |
445 | * the information along with the size of the package. | |
446 | */ | |
447 | struct vmci_datagram { | |
448 | struct vmci_handle dst; | |
449 | struct vmci_handle src; | |
450 | u64 payload_size; | |
451 | }; | |
452 | ||
453 | /* | |
454 | * Second flag is for creating a well-known handle instead of a per context | |
455 | * handle. Next flag is for deferring datagram delivery, so that the | |
456 | * datagram callback is invoked in a delayed context (not interrupt context). | |
457 | */ | |
458 | #define VMCI_FLAG_DG_NONE 0 | |
9a41691e VD |
459 | #define VMCI_FLAG_WELLKNOWN_DG_HND BIT(0) |
460 | #define VMCI_FLAG_ANYCID_DG_HND BIT(1) | |
461 | #define VMCI_FLAG_DG_DELAYED_CB BIT(2) | |
20259849 GZ |
462 | |
463 | /* | |
464 | * Maximum supported size of a VMCI datagram for routable datagrams. | |
465 | * Datagrams going to the hypervisor are allowed to be larger. | |
466 | */ | |
467 | #define VMCI_MAX_DG_SIZE (17 * 4096) | |
468 | #define VMCI_MAX_DG_PAYLOAD_SIZE (VMCI_MAX_DG_SIZE - \ | |
469 | sizeof(struct vmci_datagram)) | |
470 | #define VMCI_DG_PAYLOAD(_dg) (void *)((char *)(_dg) + \ | |
471 | sizeof(struct vmci_datagram)) | |
472 | #define VMCI_DG_HEADERSIZE sizeof(struct vmci_datagram) | |
473 | #define VMCI_DG_SIZE(_dg) (VMCI_DG_HEADERSIZE + (size_t)(_dg)->payload_size) | |
474 | #define VMCI_DG_SIZE_ALIGNED(_dg) ((VMCI_DG_SIZE(_dg) + 7) & (~((size_t) 0x7))) | |
475 | #define VMCI_MAX_DATAGRAM_QUEUE_SIZE (VMCI_MAX_DG_SIZE * 2) | |
476 | ||
477 | struct vmci_event_payload_qp { | |
478 | struct vmci_handle handle; /* queue_pair handle. */ | |
479 | u32 peer_id; /* Context id of attaching/detaching VM. */ | |
480 | u32 _pad; | |
481 | }; | |
482 | ||
483 | /* Flags for VMCI queue_pair API. */ | |
484 | enum { | |
485 | /* Fail alloc if QP not created by peer. */ | |
486 | VMCI_QPFLAG_ATTACH_ONLY = 1 << 0, | |
487 | ||
488 | /* Only allow attaches from local context. */ | |
489 | VMCI_QPFLAG_LOCAL = 1 << 1, | |
490 | ||
491 | /* Host won't block when guest is quiesced. */ | |
492 | VMCI_QPFLAG_NONBLOCK = 1 << 2, | |
493 | ||
494 | /* Pin data pages in ESX. Used with NONBLOCK */ | |
495 | VMCI_QPFLAG_PINNED = 1 << 3, | |
496 | ||
497 | /* Update the following flag when adding new flags. */ | |
498 | VMCI_QP_ALL_FLAGS = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QPFLAG_LOCAL | | |
499 | VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED), | |
500 | ||
501 | /* Convenience flags */ | |
502 | VMCI_QP_ASYMM = (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED), | |
503 | VMCI_QP_ASYMM_PEER = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QP_ASYMM), | |
504 | }; | |
505 | ||
506 | /* | |
507 | * We allow at least 1024 more event datagrams from the hypervisor past the | |
508 | * normally allowed datagrams pending for a given context. We define this | |
509 | * limit on event datagrams from the hypervisor to guard against DoS attack | |
510 | * from a malicious VM which could repeatedly attach to and detach from a queue | |
511 | * pair, causing events to be queued at the destination VM. However, the rate | |
512 | * at which such events can be generated is small since it requires a VM exit | |
513 | * and handling of queue pair attach/detach call at the hypervisor. Event | |
514 | * datagrams may be queued up at the destination VM if it has interrupts | |
515 | * disabled or if it is not draining events for some other reason. 1024 | |
516 | * datagrams is a grossly conservative estimate of the time for which | |
517 | * interrupts may be disabled in the destination VM, but at the same time does | |
518 | * not exacerbate the memory pressure problem on the host by much (size of each | |
519 | * event datagram is small). | |
520 | */ | |
521 | #define VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE \ | |
522 | (VMCI_MAX_DATAGRAM_QUEUE_SIZE + \ | |
523 | 1024 * (sizeof(struct vmci_datagram) + \ | |
524 | sizeof(struct vmci_event_data_max))) | |
525 | ||
526 | /* | |
527 | * Struct used for querying, via VMCI_RESOURCES_QUERY, the availability of | |
528 | * hypervisor resources. Struct size is 16 bytes. All fields in struct are | |
529 | * aligned to their natural alignment. | |
530 | */ | |
531 | struct vmci_resource_query_hdr { | |
532 | struct vmci_datagram hdr; | |
533 | u32 num_resources; | |
534 | u32 _padding; | |
535 | }; | |
536 | ||
537 | /* | |
538 | * Convenience struct for negotiating vectors. Must match layout of | |
539 | * VMCIResourceQueryHdr minus the struct vmci_datagram header. | |
540 | */ | |
541 | struct vmci_resource_query_msg { | |
542 | u32 num_resources; | |
543 | u32 _padding; | |
544 | u32 resources[1]; | |
545 | }; | |
546 | ||
547 | /* | |
548 | * The maximum number of resources that can be queried using | |
549 | * VMCI_RESOURCE_QUERY is 31, as the result is encoded in the lower 31 | |
550 | * bits of a positive return value. Negative values are reserved for | |
551 | * errors. | |
552 | */ | |
553 | #define VMCI_RESOURCE_QUERY_MAX_NUM 31 | |
554 | ||
555 | /* Maximum size for the VMCI_RESOURCE_QUERY request. */ | |
556 | #define VMCI_RESOURCE_QUERY_MAX_SIZE \ | |
557 | (sizeof(struct vmci_resource_query_hdr) + \ | |
558 | sizeof(u32) * VMCI_RESOURCE_QUERY_MAX_NUM) | |
559 | ||
560 | /* | |
561 | * Struct used for setting the notification bitmap. All fields in | |
562 | * struct are aligned to their natural alignment. | |
563 | */ | |
564 | struct vmci_notify_bm_set_msg { | |
565 | struct vmci_datagram hdr; | |
f2db7361 VD |
566 | union { |
567 | u32 bitmap_ppn32; | |
568 | u64 bitmap_ppn64; | |
569 | }; | |
20259849 GZ |
570 | }; |
571 | ||
572 | /* | |
573 | * Struct used for linking a doorbell handle with an index in the | |
574 | * notify bitmap. All fields in struct are aligned to their natural | |
575 | * alignment. | |
576 | */ | |
577 | struct vmci_doorbell_link_msg { | |
578 | struct vmci_datagram hdr; | |
579 | struct vmci_handle handle; | |
580 | u64 notify_idx; | |
581 | }; | |
582 | ||
583 | /* | |
584 | * Struct used for unlinking a doorbell handle from an index in the | |
585 | * notify bitmap. All fields in struct are aligned to their natural | |
586 | * alignment. | |
587 | */ | |
588 | struct vmci_doorbell_unlink_msg { | |
589 | struct vmci_datagram hdr; | |
590 | struct vmci_handle handle; | |
591 | }; | |
592 | ||
593 | /* | |
594 | * Struct used for generating a notification on a doorbell handle. All | |
595 | * fields in struct are aligned to their natural alignment. | |
596 | */ | |
597 | struct vmci_doorbell_notify_msg { | |
598 | struct vmci_datagram hdr; | |
599 | struct vmci_handle handle; | |
600 | }; | |
601 | ||
602 | /* | |
603 | * This struct is used to contain data for events. Size of this struct is a | |
604 | * multiple of 8 bytes, and all fields are aligned to their natural alignment. | |
605 | */ | |
606 | struct vmci_event_data { | |
607 | u32 event; /* 4 bytes. */ | |
608 | u32 _pad; | |
609 | /* Event payload is put here. */ | |
610 | }; | |
611 | ||
612 | /* | |
613 | * Define the different VMCI_EVENT payload data types here. All structs must | |
614 | * be a multiple of 8 bytes, and fields must be aligned to their natural | |
615 | * alignment. | |
616 | */ | |
617 | struct vmci_event_payld_ctx { | |
618 | u32 context_id; /* 4 bytes. */ | |
619 | u32 _pad; | |
620 | }; | |
621 | ||
622 | struct vmci_event_payld_qp { | |
623 | struct vmci_handle handle; /* queue_pair handle. */ | |
624 | u32 peer_id; /* Context id of attaching/detaching VM. */ | |
625 | u32 _pad; | |
626 | }; | |
627 | ||
628 | /* | |
629 | * We define the following struct to get the size of the maximum event | |
630 | * data the hypervisor may send to the guest. If adding a new event | |
631 | * payload type above, add it to the following struct too (inside the | |
632 | * union). | |
633 | */ | |
634 | struct vmci_event_data_max { | |
635 | struct vmci_event_data event_data; | |
636 | union { | |
637 | struct vmci_event_payld_ctx context_payload; | |
638 | struct vmci_event_payld_qp qp_payload; | |
639 | } ev_data_payload; | |
640 | }; | |
641 | ||
642 | /* | |
643 | * Struct used for VMCI_EVENT_SUBSCRIBE/UNSUBSCRIBE and | |
644 | * VMCI_EVENT_HANDLER messages. Struct size is 32 bytes. All fields | |
645 | * in struct are aligned to their natural alignment. | |
646 | */ | |
647 | struct vmci_event_msg { | |
648 | struct vmci_datagram hdr; | |
649 | ||
650 | /* Has event type and payload. */ | |
651 | struct vmci_event_data event_data; | |
652 | ||
653 | /* Payload gets put here. */ | |
654 | }; | |
655 | ||
656 | /* Event with context payload. */ | |
657 | struct vmci_event_ctx { | |
658 | struct vmci_event_msg msg; | |
659 | struct vmci_event_payld_ctx payload; | |
660 | }; | |
661 | ||
662 | /* Event with QP payload. */ | |
663 | struct vmci_event_qp { | |
664 | struct vmci_event_msg msg; | |
665 | struct vmci_event_payld_qp payload; | |
666 | }; | |
667 | ||
668 | /* | |
669 | * Structs used for queue_pair alloc and detach messages. We align fields of | |
670 | * these structs to 64bit boundaries. | |
671 | */ | |
672 | struct vmci_qp_alloc_msg { | |
673 | struct vmci_datagram hdr; | |
674 | struct vmci_handle handle; | |
675 | u32 peer; | |
676 | u32 flags; | |
677 | u64 produce_size; | |
678 | u64 consume_size; | |
679 | u64 num_ppns; | |
680 | ||
681 | /* List of PPNs placed here. */ | |
682 | }; | |
683 | ||
684 | struct vmci_qp_detach_msg { | |
685 | struct vmci_datagram hdr; | |
686 | struct vmci_handle handle; | |
687 | }; | |
688 | ||
689 | /* VMCI Doorbell API. */ | |
9a41691e | 690 | #define VMCI_FLAG_DELAYED_CB BIT(0) |
20259849 GZ |
691 | |
692 | typedef void (*vmci_callback) (void *client_data); | |
693 | ||
694 | /* | |
695 | * struct vmci_qp - A vmw_vmci queue pair handle. | |
696 | * | |
697 | * This structure is used as a handle to a queue pair created by | |
698 | * VMCI. It is intentionally left opaque to clients. | |
699 | */ | |
700 | struct vmci_qp; | |
701 | ||
702 | /* Callback needed for correctly waiting on events. */ | |
703 | typedef int (*vmci_datagram_recv_cb) (void *client_data, | |
704 | struct vmci_datagram *msg); | |
705 | ||
706 | /* VMCI Event API. */ | |
707 | typedef void (*vmci_event_cb) (u32 sub_id, const struct vmci_event_data *ed, | |
708 | void *client_data); | |
709 | ||
710 | /* | |
711 | * We use the following inline function to access the payload data | |
712 | * associated with an event data. | |
713 | */ | |
714 | static inline const void * | |
715 | vmci_event_data_const_payload(const struct vmci_event_data *ev_data) | |
716 | { | |
717 | return (const char *)ev_data + sizeof(*ev_data); | |
718 | } | |
719 | ||
720 | static inline void *vmci_event_data_payload(struct vmci_event_data *ev_data) | |
721 | { | |
722 | return (void *)vmci_event_data_const_payload(ev_data); | |
723 | } | |
724 | ||
f42a0fd1 JH |
725 | /* |
726 | * Helper to read a value from a head or tail pointer. For X86_32, the | |
727 | * pointer is treated as a 32bit value, since the pointer value | |
728 | * never exceeds a 32bit value in this case. Also, doing an | |
729 | * atomic64_read on X86_32 uniprocessor systems may be implemented | |
730 | * as a non locked cmpxchg8b, that may end up overwriting updates done | |
731 | * by the VMCI device to the memory location. On 32bit SMP, the lock | |
732 | * prefix will be used, so correctness isn't an issue, but using a | |
733 | * 64bit operation still adds unnecessary overhead. | |
734 | */ | |
735 | static inline u64 vmci_q_read_pointer(atomic64_t *var) | |
736 | { | |
737 | #if defined(CONFIG_X86_32) | |
738 | return atomic_read((atomic_t *)var); | |
739 | #else | |
740 | return atomic64_read(var); | |
741 | #endif | |
742 | } | |
743 | ||
744 | /* | |
745 | * Helper to set the value of a head or tail pointer. For X86_32, the | |
746 | * pointer is treated as a 32bit value, since the pointer value | |
747 | * never exceeds a 32bit value in this case. On 32bit SMP, using a | |
748 | * locked cmpxchg8b adds unnecessary overhead. | |
749 | */ | |
750 | static inline void vmci_q_set_pointer(atomic64_t *var, | |
751 | u64 new_val) | |
752 | { | |
753 | #if defined(CONFIG_X86_32) | |
754 | return atomic_set((atomic_t *)var, (u32)new_val); | |
755 | #else | |
756 | return atomic64_set(var, new_val); | |
757 | #endif | |
758 | } | |
759 | ||
20259849 GZ |
760 | /* |
761 | * Helper to add a given offset to a head or tail pointer. Wraps the | |
762 | * value of the pointer around the max size of the queue. | |
763 | */ | |
764 | static inline void vmci_qp_add_pointer(atomic64_t *var, | |
765 | size_t add, | |
766 | u64 size) | |
767 | { | |
f42a0fd1 | 768 | u64 new_val = vmci_q_read_pointer(var); |
20259849 GZ |
769 | |
770 | if (new_val >= size - add) | |
771 | new_val -= size; | |
772 | ||
773 | new_val += add; | |
774 | ||
f42a0fd1 | 775 | vmci_q_set_pointer(var, new_val); |
20259849 GZ |
776 | } |
777 | ||
778 | /* | |
779 | * Helper routine to get the Producer Tail from the supplied queue. | |
780 | */ | |
781 | static inline u64 | |
782 | vmci_q_header_producer_tail(const struct vmci_queue_header *q_header) | |
783 | { | |
784 | struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header; | |
f42a0fd1 | 785 | return vmci_q_read_pointer(&qh->producer_tail); |
20259849 GZ |
786 | } |
787 | ||
788 | /* | |
789 | * Helper routine to get the Consumer Head from the supplied queue. | |
790 | */ | |
791 | static inline u64 | |
792 | vmci_q_header_consumer_head(const struct vmci_queue_header *q_header) | |
793 | { | |
794 | struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header; | |
f42a0fd1 | 795 | return vmci_q_read_pointer(&qh->consumer_head); |
20259849 GZ |
796 | } |
797 | ||
798 | /* | |
799 | * Helper routine to increment the Producer Tail. Fundamentally, | |
800 | * vmci_qp_add_pointer() is used to manipulate the tail itself. | |
801 | */ | |
802 | static inline void | |
803 | vmci_q_header_add_producer_tail(struct vmci_queue_header *q_header, | |
804 | size_t add, | |
805 | u64 queue_size) | |
806 | { | |
807 | vmci_qp_add_pointer(&q_header->producer_tail, add, queue_size); | |
808 | } | |
809 | ||
810 | /* | |
811 | * Helper routine to increment the Consumer Head. Fundamentally, | |
812 | * vmci_qp_add_pointer() is used to manipulate the head itself. | |
813 | */ | |
814 | static inline void | |
815 | vmci_q_header_add_consumer_head(struct vmci_queue_header *q_header, | |
816 | size_t add, | |
817 | u64 queue_size) | |
818 | { | |
819 | vmci_qp_add_pointer(&q_header->consumer_head, add, queue_size); | |
820 | } | |
821 | ||
822 | /* | |
823 | * Helper routine for getting the head and the tail pointer for a queue. | |
824 | * Both the VMCIQueues are needed to get both the pointers for one queue. | |
825 | */ | |
826 | static inline void | |
827 | vmci_q_header_get_pointers(const struct vmci_queue_header *produce_q_header, | |
828 | const struct vmci_queue_header *consume_q_header, | |
829 | u64 *producer_tail, | |
830 | u64 *consumer_head) | |
831 | { | |
832 | if (producer_tail) | |
833 | *producer_tail = vmci_q_header_producer_tail(produce_q_header); | |
834 | ||
835 | if (consumer_head) | |
836 | *consumer_head = vmci_q_header_consumer_head(consume_q_header); | |
837 | } | |
838 | ||
839 | static inline void vmci_q_header_init(struct vmci_queue_header *q_header, | |
840 | const struct vmci_handle handle) | |
841 | { | |
842 | q_header->handle = handle; | |
843 | atomic64_set(&q_header->producer_tail, 0); | |
844 | atomic64_set(&q_header->consumer_head, 0); | |
845 | } | |
846 | ||
847 | /* | |
848 | * Finds available free space in a produce queue to enqueue more | |
849 | * data or reports an error if queue pair corruption is detected. | |
850 | */ | |
851 | static s64 | |
852 | vmci_q_header_free_space(const struct vmci_queue_header *produce_q_header, | |
853 | const struct vmci_queue_header *consume_q_header, | |
854 | const u64 produce_q_size) | |
855 | { | |
856 | u64 tail; | |
857 | u64 head; | |
858 | u64 free_space; | |
859 | ||
860 | tail = vmci_q_header_producer_tail(produce_q_header); | |
861 | head = vmci_q_header_consumer_head(consume_q_header); | |
862 | ||
863 | if (tail >= produce_q_size || head >= produce_q_size) | |
864 | return VMCI_ERROR_INVALID_SIZE; | |
865 | ||
866 | /* | |
867 | * Deduct 1 to avoid tail becoming equal to head which causes | |
868 | * ambiguity. If head and tail are equal it means that the | |
869 | * queue is empty. | |
870 | */ | |
871 | if (tail >= head) | |
872 | free_space = produce_q_size - (tail - head) - 1; | |
873 | else | |
874 | free_space = head - tail - 1; | |
875 | ||
876 | return free_space; | |
877 | } | |
878 | ||
879 | /* | |
880 | * vmci_q_header_free_space() does all the heavy lifting of | |
881 | * determing the number of free bytes in a Queue. This routine, | |
882 | * then subtracts that size from the full size of the Queue so | |
883 | * the caller knows how many bytes are ready to be dequeued. | |
884 | * Results: | |
885 | * On success, available data size in bytes (up to MAX_INT64). | |
886 | * On failure, appropriate error code. | |
887 | */ | |
888 | static inline s64 | |
889 | vmci_q_header_buf_ready(const struct vmci_queue_header *consume_q_header, | |
890 | const struct vmci_queue_header *produce_q_header, | |
891 | const u64 consume_q_size) | |
892 | { | |
893 | s64 free_space; | |
894 | ||
895 | free_space = vmci_q_header_free_space(consume_q_header, | |
896 | produce_q_header, consume_q_size); | |
897 | if (free_space < VMCI_SUCCESS) | |
898 | return free_space; | |
899 | ||
900 | return consume_q_size - free_space - 1; | |
901 | } | |
902 | ||
903 | ||
904 | #endif /* _VMW_VMCI_DEF_H_ */ |