]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - ubuntu/vbox/vboxvideo/vbva_base.c
UBUNTU: ubuntu: vbox -- update to 5.2.0-dfsg-2
[mirror_ubuntu-bionic-kernel.git] / ubuntu / vbox / vboxvideo / vbva_base.c
1 /*
2 * Copyright (C) 2006-2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "vboxvideo_guest.h"
24 #include "vbox_err.h"
25 #include "hgsmi_channels.h"
26
27 /*
28 * There is a hardware ring buffer in the graphics device video RAM, formerly
29 * in the VBox VMMDev PCI memory space.
30 * All graphics commands go there serialized by vbva_buffer_begin_update.
31 * and vbva_buffer_end_update.
32 *
33 * free_offset is writing position. data_offset is reading position.
34 * free_offset == data_offset means buffer is empty.
35 * There must be always gap between data_offset and free_offset when data
36 * are in the buffer.
37 * Guest only changes free_offset, host changes data_offset.
38 */
39
40 /* Forward declarations of internal functions. */
41 static void vbva_buffer_flush(struct gen_pool * ctx);
42 static void vbva_buffer_place_data_at(struct vbva_buf_context * ctx, const void *p,
43 u32 len, u32 offset);
44 static bool vbva_write(struct vbva_buf_context * ctx,
45 struct gen_pool * pHGSMICtx,
46 const void *p, u32 len);
47
48
49 static bool vbva_inform_host(struct vbva_buf_context * ctx,
50 struct gen_pool * pHGSMICtx,
51 s32 screen, bool enable)
52 {
53 bool ret = false;
54
55 #if 0 /* All callers check this */
56 if (ppdev->bHGSMISupported)
57 #endif
58 {
59 void *p = hgsmi_buffer_alloc(pHGSMICtx,
60 sizeof (struct vbva_enable_ex),
61 HGSMI_CH_VBVA,
62 VBVA_ENABLE);
63 if (!p) {
64 // LogFunc(("HGSMIHeapAlloc failed\n"));
65 } else {
66 struct vbva_enable_ex *pEnable = p;
67
68 pEnable->base.flags = enable? VBVA_F_ENABLE: VBVA_F_DISABLE;
69 pEnable->base.offset = ctx->buffer_offset;
70 pEnable->base.result = VERR_NOT_SUPPORTED;
71 if (screen >= 0) {
72 pEnable->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET;
73 pEnable->screen_id = screen;
74 }
75
76 hgsmi_buffer_submit(pHGSMICtx, p);
77
78 if (enable) {
79 ret = RT_SUCCESS(pEnable->base.result);
80 } else {
81 ret = true;
82 }
83
84 hgsmi_buffer_free(pHGSMICtx, p);
85 }
86 }
87
88 return ret;
89 }
90
91 /*
92 * Public hardware buffer methods.
93 */
94 bool vbva_enable(struct vbva_buf_context * ctx,
95 struct gen_pool * pHGSMICtx,
96 VBVABUFFER *vbva, s32 screen)
97 {
98 bool ret = false;
99
100 // LogFlowFunc(("vbva %p\n", vbva));
101
102 #if 0 /* All callers check this */
103 if (ppdev->bHGSMISupported)
104 #endif
105 {
106 // LogFunc(("vbva %p vbva off 0x%x\n", vbva, ctx->buffer_offset));
107
108 vbva->host_flags.host_events = 0;
109 vbva->host_flags.supported_orders = 0;
110 vbva->data_offset = 0;
111 vbva->free_offset = 0;
112 memset(vbva->records, 0, sizeof (vbva->records));
113 vbva->first_record_index = 0;
114 vbva->free_record_index = 0;
115 vbva->partial_write_tresh = 256;
116 vbva->data_len = ctx->buffer_length - sizeof (VBVABUFFER) + sizeof (vbva->data);
117
118 ctx->buffer_overflow = false;
119 ctx->record = NULL;
120 ctx->vbva = vbva;
121
122 ret = vbva_inform_host(ctx, pHGSMICtx, screen, true);
123 }
124
125 if (!ret) {
126 vbva_disable(ctx, pHGSMICtx, screen);
127 }
128
129 return ret;
130 }
131
132 void vbva_disable(struct vbva_buf_context * ctx,
133 struct gen_pool * pHGSMICtx,
134 s32 screen)
135 {
136 // LogFlowFunc(("\n"));
137
138 ctx->buffer_overflow = false;
139 ctx->record = NULL;
140 ctx->vbva = NULL;
141
142 vbva_inform_host(ctx, pHGSMICtx, screen, false);
143
144 return;
145 }
146
147 bool vbva_buffer_begin_update(struct vbva_buf_context * ctx,
148 struct gen_pool * pHGSMICtx)
149 {
150 bool ret = false;
151
152 // LogFunc(("flags = 0x%08X\n", ctx->vbva? ctx->vbva->host_events: -1));
153
154 if ( ctx->vbva
155 && (ctx->vbva->host_flags.host_events & VBVA_F_MODE_ENABLED)) {
156 u32 next;
157
158 WARN_ON_ONCE(!((!ctx->buffer_overflow)));
159 WARN_ON_ONCE(!((ctx->record == NULL)));
160
161 next = (ctx->vbva->free_record_index + 1) % VBVA_MAX_RECORDS;
162
163 if (next == ctx->vbva->first_record_index) {
164 /* All slots in the records queue are used. */
165 vbva_buffer_flush (pHGSMICtx);
166 }
167
168 if (next == ctx->vbva->first_record_index) {
169 /* Even after flush there is no place. Fail the request. */
170 // LogFunc(("no space in the queue of records!!! first %d, last %d\n",
171 // ctx->vbva->first_record_index, ctx->vbva->free_record_index));
172 } else {
173 /* Initialize the record. */
174 VBVARECORD *record = &ctx->vbva->records[ctx->vbva->free_record_index];
175
176 record->len_and_flags = VBVA_F_RECORD_PARTIAL;
177
178 ctx->vbva->free_record_index = next;
179
180 // LogFunc(("next = %d\n", next));
181
182 /* Remember which record we are using. */
183 ctx->record = record;
184
185 ret = true;
186 }
187 }
188
189 return ret;
190 }
191
192 void vbva_buffer_end_update(struct vbva_buf_context * ctx)
193 {
194 VBVARECORD *record;
195
196 // LogFunc(("\n"));
197
198 WARN_ON_ONCE(!((ctx->vbva)));
199
200 record = ctx->record;
201 WARN_ON_ONCE(!((record && (record->len_and_flags & VBVA_F_RECORD_PARTIAL))));
202
203 /* Mark the record completed. */
204 record->len_and_flags &= ~VBVA_F_RECORD_PARTIAL;
205
206 ctx->buffer_overflow = false;
207 ctx->record = NULL;
208
209 return;
210 }
211
212 /*
213 * Private operations.
214 */
215 static u32 vbva_buffer_available (const VBVABUFFER *vbva)
216 {
217 s32 diff = vbva->data_offset - vbva->free_offset;
218
219 return diff > 0? diff: vbva->data_len + diff;
220 }
221
222 static void vbva_buffer_flush(struct gen_pool * ctx)
223 {
224 /* Issue the flush command. */
225 void *p = hgsmi_buffer_alloc(ctx,
226 sizeof (VBVAFLUSH),
227 HGSMI_CH_VBVA,
228 VBVA_FLUSH);
229 if (!p) {
230 // LogFunc(("HGSMIHeapAlloc failed\n"));
231 } else {
232 VBVAFLUSH *pFlush = (VBVAFLUSH *)p;
233
234 pFlush->reserved = 0;
235
236 hgsmi_buffer_submit(ctx, p);
237
238 hgsmi_buffer_free(ctx, p);
239 }
240
241 return;
242 }
243
244 static void vbva_buffer_place_data_at(struct vbva_buf_context * ctx, const void *p,
245 u32 len, u32 offset)
246 {
247 VBVABUFFER *vbva = ctx->vbva;
248 u32 bytes_till_boundary = vbva->data_len - offset;
249 u8 *dst = &vbva->data[offset];
250 s32 diff = len - bytes_till_boundary;
251
252 if (diff <= 0) {
253 /* Chunk will not cross buffer boundary. */
254 memcpy (dst, p, len);
255 } else {
256 /* Chunk crosses buffer boundary. */
257 memcpy (dst, p, bytes_till_boundary);
258 memcpy (&vbva->data[0], (u8 *)p + bytes_till_boundary, diff);
259 }
260
261 return;
262 }
263
264 static bool vbva_write(struct vbva_buf_context * ctx,
265 struct gen_pool * pHGSMICtx,
266 const void *p, u32 len)
267 {
268 VBVARECORD *record;
269 u32 available;
270
271 u32 cbWritten = 0;
272
273 VBVABUFFER *vbva = ctx->vbva;
274 WARN_ON_ONCE(!((vbva)));
275
276 if (!vbva || ctx->buffer_overflow) {
277 return false;
278 }
279
280 WARN_ON_ONCE(!((vbva->first_record_index != vbva->free_record_index)));
281
282 record = ctx->record;
283 WARN_ON_ONCE(!((record && (record->len_and_flags & VBVA_F_RECORD_PARTIAL))));
284
285 // LogFunc(("%d\n", len));
286
287 available = vbva_buffer_available (vbva);
288
289 while (len > 0) {
290 u32 chunk = len;
291
292 // LogFunc(("vbva->free_offset %d, record->len_and_flags 0x%08X, available %d, len %d, cbWritten %d\n",
293 // vbva->free_offset, record->len_and_flags, available, len, cbWritten));
294
295 if (chunk >= available) {
296 // LogFunc(("1) avail %d, chunk %d\n", available, chunk));
297
298 vbva_buffer_flush (pHGSMICtx);
299
300 available = vbva_buffer_available (vbva);
301
302 if (chunk >= available) {
303 // LogFunc(("no place for %d bytes. Only %d bytes available after flush. Going to partial writes.\n",
304 // len, available));
305
306 if (available <= vbva->partial_write_tresh) {
307 // LogFunc(("Buffer overflow!!!\n"));
308 ctx->buffer_overflow = true;
309 WARN_ON_ONCE(!((false)));
310 return false;
311 }
312
313 chunk = available - vbva->partial_write_tresh;
314 }
315 }
316
317 WARN_ON_ONCE(!((chunk <= len)));
318 WARN_ON_ONCE(!((chunk <= vbva_buffer_available (vbva))));
319
320 vbva_buffer_place_data_at (ctx, (u8 *)p + cbWritten, chunk, vbva->free_offset);
321
322 vbva->free_offset = (vbva->free_offset + chunk) % vbva->data_len;
323 record->len_and_flags += chunk;
324 available -= chunk;
325
326 len -= chunk;
327 cbWritten += chunk;
328 }
329
330 return true;
331 }
332
333 /*
334 * Public writer to the hardware buffer.
335 */
336 bool VBoxVBVAWrite(struct vbva_buf_context * ctx,
337 struct gen_pool * pHGSMICtx,
338 const void *pv, u32 len)
339 {
340 return vbva_write (ctx, pHGSMICtx, pv, len);
341 }
342
343 bool VBoxVBVAOrderSupported(struct vbva_buf_context * ctx, unsigned code)
344 {
345 VBVABUFFER *vbva = ctx->vbva;
346
347 if (!vbva) {
348 return false;
349 }
350
351 if (vbva->host_flags.supported_orders & (1 << code)) {
352 return true;
353 }
354
355 return false;
356 }
357
358 void VBoxVBVASetupBufferContext(struct vbva_buf_context * ctx,
359 u32 buffer_offset,
360 u32 buffer_length)
361 {
362 ctx->buffer_offset = buffer_offset;
363 ctx->buffer_length = buffer_length;
364 }