]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - ubuntu/vbox/vboxvideo/vbva_base.c
2 * Copyright (C) 2006-2017 Oracle Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "vboxvideo_guest.h"
25 #include "hgsmi_channels.h"
28 * There is a hardware ring buffer in the graphics device video RAM, formerly
29 * in the VBox VMMDev PCI memory space.
30 * All graphics commands go there serialized by vbva_buffer_begin_update.
31 * and vbva_buffer_end_update.
33 * free_offset is writing position. data_offset is reading position.
34 * free_offset == data_offset means buffer is empty.
35 * There must be always gap between data_offset and free_offset when data
37 * Guest only changes free_offset, host changes data_offset.
40 /* Forward declarations of internal functions. */
41 static void vbva_buffer_flush(struct gen_pool
* ctx
);
42 static void vbva_buffer_place_data_at(struct vbva_buf_context
* ctx
, const void *p
,
44 static bool vbva_write(struct vbva_buf_context
* ctx
,
45 struct gen_pool
* pHGSMICtx
,
46 const void *p
, u32 len
);
49 static bool vbva_inform_host(struct vbva_buf_context
* ctx
,
50 struct gen_pool
* pHGSMICtx
,
51 s32 screen
, bool enable
)
55 #if 0 /* All callers check this */
56 if (ppdev
->bHGSMISupported
)
59 void *p
= hgsmi_buffer_alloc(pHGSMICtx
,
60 sizeof (struct vbva_enable_ex
),
64 // LogFunc(("HGSMIHeapAlloc failed\n"));
66 struct vbva_enable_ex
*pEnable
= p
;
68 pEnable
->base
.flags
= enable
? VBVA_F_ENABLE
: VBVA_F_DISABLE
;
69 pEnable
->base
.offset
= ctx
->buffer_offset
;
70 pEnable
->base
.result
= VERR_NOT_SUPPORTED
;
72 pEnable
->base
.flags
|= VBVA_F_EXTENDED
| VBVA_F_ABSOFFSET
;
73 pEnable
->screen_id
= screen
;
76 hgsmi_buffer_submit(pHGSMICtx
, p
);
79 ret
= RT_SUCCESS(pEnable
->base
.result
);
84 hgsmi_buffer_free(pHGSMICtx
, p
);
92 * Public hardware buffer methods.
94 bool vbva_enable(struct vbva_buf_context
* ctx
,
95 struct gen_pool
* pHGSMICtx
,
96 VBVABUFFER
*vbva
, s32 screen
)
100 // LogFlowFunc(("vbva %p\n", vbva));
102 #if 0 /* All callers check this */
103 if (ppdev
->bHGSMISupported
)
106 // LogFunc(("vbva %p vbva off 0x%x\n", vbva, ctx->buffer_offset));
108 vbva
->host_flags
.host_events
= 0;
109 vbva
->host_flags
.supported_orders
= 0;
110 vbva
->data_offset
= 0;
111 vbva
->free_offset
= 0;
112 memset(vbva
->records
, 0, sizeof (vbva
->records
));
113 vbva
->first_record_index
= 0;
114 vbva
->free_record_index
= 0;
115 vbva
->partial_write_tresh
= 256;
116 vbva
->data_len
= ctx
->buffer_length
- sizeof (VBVABUFFER
) + sizeof (vbva
->data
);
118 ctx
->buffer_overflow
= false;
122 ret
= vbva_inform_host(ctx
, pHGSMICtx
, screen
, true);
126 vbva_disable(ctx
, pHGSMICtx
, screen
);
132 void vbva_disable(struct vbva_buf_context
* ctx
,
133 struct gen_pool
* pHGSMICtx
,
136 // LogFlowFunc(("\n"));
138 ctx
->buffer_overflow
= false;
142 vbva_inform_host(ctx
, pHGSMICtx
, screen
, false);
147 bool vbva_buffer_begin_update(struct vbva_buf_context
* ctx
,
148 struct gen_pool
* pHGSMICtx
)
152 // LogFunc(("flags = 0x%08X\n", ctx->vbva? ctx->vbva->host_events: -1));
155 && (ctx
->vbva
->host_flags
.host_events
& VBVA_F_MODE_ENABLED
)) {
158 WARN_ON_ONCE(!((!ctx
->buffer_overflow
)));
159 WARN_ON_ONCE(!((ctx
->record
== NULL
)));
161 next
= (ctx
->vbva
->free_record_index
+ 1) % VBVA_MAX_RECORDS
;
163 if (next
== ctx
->vbva
->first_record_index
) {
164 /* All slots in the records queue are used. */
165 vbva_buffer_flush (pHGSMICtx
);
168 if (next
== ctx
->vbva
->first_record_index
) {
169 /* Even after flush there is no place. Fail the request. */
170 // LogFunc(("no space in the queue of records!!! first %d, last %d\n",
171 // ctx->vbva->first_record_index, ctx->vbva->free_record_index));
173 /* Initialize the record. */
174 VBVARECORD
*record
= &ctx
->vbva
->records
[ctx
->vbva
->free_record_index
];
176 record
->len_and_flags
= VBVA_F_RECORD_PARTIAL
;
178 ctx
->vbva
->free_record_index
= next
;
180 // LogFunc(("next = %d\n", next));
182 /* Remember which record we are using. */
183 ctx
->record
= record
;
192 void vbva_buffer_end_update(struct vbva_buf_context
* ctx
)
198 WARN_ON_ONCE(!((ctx
->vbva
)));
200 record
= ctx
->record
;
201 WARN_ON_ONCE(!((record
&& (record
->len_and_flags
& VBVA_F_RECORD_PARTIAL
))));
203 /* Mark the record completed. */
204 record
->len_and_flags
&= ~VBVA_F_RECORD_PARTIAL
;
206 ctx
->buffer_overflow
= false;
213 * Private operations.
215 static u32
vbva_buffer_available (const VBVABUFFER
*vbva
)
217 s32 diff
= vbva
->data_offset
- vbva
->free_offset
;
219 return diff
> 0? diff
: vbva
->data_len
+ diff
;
222 static void vbva_buffer_flush(struct gen_pool
* ctx
)
224 /* Issue the flush command. */
225 void *p
= hgsmi_buffer_alloc(ctx
,
230 // LogFunc(("HGSMIHeapAlloc failed\n"));
232 VBVAFLUSH
*pFlush
= (VBVAFLUSH
*)p
;
234 pFlush
->reserved
= 0;
236 hgsmi_buffer_submit(ctx
, p
);
238 hgsmi_buffer_free(ctx
, p
);
244 static void vbva_buffer_place_data_at(struct vbva_buf_context
* ctx
, const void *p
,
247 VBVABUFFER
*vbva
= ctx
->vbva
;
248 u32 bytes_till_boundary
= vbva
->data_len
- offset
;
249 u8
*dst
= &vbva
->data
[offset
];
250 s32 diff
= len
- bytes_till_boundary
;
253 /* Chunk will not cross buffer boundary. */
254 memcpy (dst
, p
, len
);
256 /* Chunk crosses buffer boundary. */
257 memcpy (dst
, p
, bytes_till_boundary
);
258 memcpy (&vbva
->data
[0], (u8
*)p
+ bytes_till_boundary
, diff
);
264 static bool vbva_write(struct vbva_buf_context
* ctx
,
265 struct gen_pool
* pHGSMICtx
,
266 const void *p
, u32 len
)
273 VBVABUFFER
*vbva
= ctx
->vbva
;
274 WARN_ON_ONCE(!((vbva
)));
276 if (!vbva
|| ctx
->buffer_overflow
) {
280 WARN_ON_ONCE(!((vbva
->first_record_index
!= vbva
->free_record_index
)));
282 record
= ctx
->record
;
283 WARN_ON_ONCE(!((record
&& (record
->len_and_flags
& VBVA_F_RECORD_PARTIAL
))));
285 // LogFunc(("%d\n", len));
287 available
= vbva_buffer_available (vbva
);
292 // LogFunc(("vbva->free_offset %d, record->len_and_flags 0x%08X, available %d, len %d, cbWritten %d\n",
293 // vbva->free_offset, record->len_and_flags, available, len, cbWritten));
295 if (chunk
>= available
) {
296 // LogFunc(("1) avail %d, chunk %d\n", available, chunk));
298 vbva_buffer_flush (pHGSMICtx
);
300 available
= vbva_buffer_available (vbva
);
302 if (chunk
>= available
) {
303 // LogFunc(("no place for %d bytes. Only %d bytes available after flush. Going to partial writes.\n",
306 if (available
<= vbva
->partial_write_tresh
) {
307 // LogFunc(("Buffer overflow!!!\n"));
308 ctx
->buffer_overflow
= true;
309 WARN_ON_ONCE(!((false)));
313 chunk
= available
- vbva
->partial_write_tresh
;
317 WARN_ON_ONCE(!((chunk
<= len
)));
318 WARN_ON_ONCE(!((chunk
<= vbva_buffer_available (vbva
))));
320 vbva_buffer_place_data_at (ctx
, (u8
*)p
+ cbWritten
, chunk
, vbva
->free_offset
);
322 vbva
->free_offset
= (vbva
->free_offset
+ chunk
) % vbva
->data_len
;
323 record
->len_and_flags
+= chunk
;
334 * Public writer to the hardware buffer.
336 bool VBoxVBVAWrite(struct vbva_buf_context
* ctx
,
337 struct gen_pool
* pHGSMICtx
,
338 const void *pv
, u32 len
)
340 return vbva_write (ctx
, pHGSMICtx
, pv
, len
);
343 bool VBoxVBVAOrderSupported(struct vbva_buf_context
* ctx
, unsigned code
)
345 VBVABUFFER
*vbva
= ctx
->vbva
;
351 if (vbva
->host_flags
.supported_orders
& (1 << code
)) {
358 void VBoxVBVASetupBufferContext(struct vbva_buf_context
* ctx
,
362 ctx
->buffer_offset
= buffer_offset
;
363 ctx
->buffer_length
= buffer_length
;