]>
Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2008 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice (including the next | |
13 | * paragraph) shall be included in all copies or substantial portions of the | |
14 | * Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
22 | * DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: | |
25 | * Jerome Glisse <glisse@freedesktop.org> | |
26 | */ | |
27 | #include <linux/list_sort.h> | |
28 | #include <drm/drmP.h> | |
29 | #include <drm/amdgpu_drm.h> | |
30 | #include "amdgpu.h" | |
31 | #include "amdgpu_trace.h" | |
32 | ||
33 | #define AMDGPU_CS_MAX_PRIORITY 32u | |
34 | #define AMDGPU_CS_NUM_BUCKETS (AMDGPU_CS_MAX_PRIORITY + 1) | |
35 | ||
36 | /* This is based on the bucket sort with O(n) time complexity. | |
37 | * An item with priority "i" is added to bucket[i]. The lists are then | |
38 | * concatenated in descending order. | |
39 | */ | |
40 | struct amdgpu_cs_buckets { | |
41 | struct list_head bucket[AMDGPU_CS_NUM_BUCKETS]; | |
42 | }; | |
43 | ||
44 | static void amdgpu_cs_buckets_init(struct amdgpu_cs_buckets *b) | |
45 | { | |
46 | unsigned i; | |
47 | ||
48 | for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++) | |
49 | INIT_LIST_HEAD(&b->bucket[i]); | |
50 | } | |
51 | ||
52 | static void amdgpu_cs_buckets_add(struct amdgpu_cs_buckets *b, | |
53 | struct list_head *item, unsigned priority) | |
54 | { | |
55 | /* Since buffers which appear sooner in the relocation list are | |
56 | * likely to be used more often than buffers which appear later | |
57 | * in the list, the sort mustn't change the ordering of buffers | |
58 | * with the same priority, i.e. it must be stable. | |
59 | */ | |
60 | list_add_tail(item, &b->bucket[min(priority, AMDGPU_CS_MAX_PRIORITY)]); | |
61 | } | |
62 | ||
63 | static void amdgpu_cs_buckets_get_list(struct amdgpu_cs_buckets *b, | |
64 | struct list_head *out_list) | |
65 | { | |
66 | unsigned i; | |
67 | ||
68 | /* Connect the sorted buckets in the output list. */ | |
69 | for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++) { | |
70 | list_splice(&b->bucket[i], out_list); | |
71 | } | |
72 | } | |
73 | ||
74 | int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | |
75 | u32 ip_instance, u32 ring, | |
76 | struct amdgpu_ring **out_ring) | |
77 | { | |
78 | /* Right now all IPs have only one instance - multiple rings. */ | |
79 | if (ip_instance != 0) { | |
80 | DRM_ERROR("invalid ip instance: %d\n", ip_instance); | |
81 | return -EINVAL; | |
82 | } | |
83 | ||
84 | switch (ip_type) { | |
85 | default: | |
86 | DRM_ERROR("unknown ip type: %d\n", ip_type); | |
87 | return -EINVAL; | |
88 | case AMDGPU_HW_IP_GFX: | |
89 | if (ring < adev->gfx.num_gfx_rings) { | |
90 | *out_ring = &adev->gfx.gfx_ring[ring]; | |
91 | } else { | |
92 | DRM_ERROR("only %d gfx rings are supported now\n", | |
93 | adev->gfx.num_gfx_rings); | |
94 | return -EINVAL; | |
95 | } | |
96 | break; | |
97 | case AMDGPU_HW_IP_COMPUTE: | |
98 | if (ring < adev->gfx.num_compute_rings) { | |
99 | *out_ring = &adev->gfx.compute_ring[ring]; | |
100 | } else { | |
101 | DRM_ERROR("only %d compute rings are supported now\n", | |
102 | adev->gfx.num_compute_rings); | |
103 | return -EINVAL; | |
104 | } | |
105 | break; | |
106 | case AMDGPU_HW_IP_DMA: | |
c113ea1c AD |
107 | if (ring < adev->sdma.num_instances) { |
108 | *out_ring = &adev->sdma.instance[ring].ring; | |
d38ceaf9 | 109 | } else { |
c113ea1c AD |
110 | DRM_ERROR("only %d SDMA rings are supported\n", |
111 | adev->sdma.num_instances); | |
d38ceaf9 AD |
112 | return -EINVAL; |
113 | } | |
114 | break; | |
115 | case AMDGPU_HW_IP_UVD: | |
116 | *out_ring = &adev->uvd.ring; | |
117 | break; | |
118 | case AMDGPU_HW_IP_VCE: | |
119 | if (ring < 2){ | |
120 | *out_ring = &adev->vce.ring[ring]; | |
121 | } else { | |
122 | DRM_ERROR("only two VCE rings are supported\n"); | |
123 | return -EINVAL; | |
124 | } | |
125 | break; | |
126 | } | |
127 | return 0; | |
128 | } | |
129 | ||
91acbeb6 CK |
130 | static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, |
131 | struct drm_amdgpu_cs_chunk_fence *fence_data) | |
132 | { | |
133 | struct drm_gem_object *gobj; | |
134 | uint32_t handle; | |
135 | ||
136 | handle = fence_data->handle; | |
137 | gobj = drm_gem_object_lookup(p->adev->ddev, p->filp, | |
138 | fence_data->handle); | |
139 | if (gobj == NULL) | |
140 | return -EINVAL; | |
141 | ||
142 | p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); | |
143 | p->uf.offset = fence_data->offset; | |
144 | ||
145 | if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) { | |
146 | drm_gem_object_unreference_unlocked(gobj); | |
147 | return -EINVAL; | |
148 | } | |
149 | ||
150 | p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo); | |
151 | p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT; | |
152 | p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT; | |
153 | p->uf_entry.priority = 0; | |
154 | p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; | |
155 | p->uf_entry.tv.shared = true; | |
156 | ||
157 | drm_gem_object_unreference_unlocked(gobj); | |
158 | return 0; | |
159 | } | |
160 | ||
d38ceaf9 AD |
161 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) |
162 | { | |
163 | union drm_amdgpu_cs *cs = data; | |
164 | uint64_t *chunk_array_user; | |
1d263474 | 165 | uint64_t *chunk_array; |
d38ceaf9 | 166 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
54313503 DC |
167 | unsigned size; |
168 | int i; | |
1d263474 | 169 | int ret; |
d38ceaf9 | 170 | |
1d263474 DC |
171 | if (cs->in.num_chunks == 0) |
172 | return 0; | |
173 | ||
174 | chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); | |
175 | if (!chunk_array) | |
176 | return -ENOMEM; | |
d38ceaf9 | 177 | |
3cb485f3 CK |
178 | p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); |
179 | if (!p->ctx) { | |
1d263474 DC |
180 | ret = -EINVAL; |
181 | goto free_chunk; | |
3cb485f3 | 182 | } |
1d263474 | 183 | |
a3348bb8 | 184 | p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); |
d38ceaf9 AD |
185 | |
186 | /* get chunks */ | |
187 | INIT_LIST_HEAD(&p->validated); | |
028423b0 | 188 | chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks); |
d38ceaf9 AD |
189 | if (copy_from_user(chunk_array, chunk_array_user, |
190 | sizeof(uint64_t)*cs->in.num_chunks)) { | |
1d263474 DC |
191 | ret = -EFAULT; |
192 | goto put_bo_list; | |
d38ceaf9 AD |
193 | } |
194 | ||
195 | p->nchunks = cs->in.num_chunks; | |
e60b344f | 196 | p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), |
d38ceaf9 | 197 | GFP_KERNEL); |
1d263474 DC |
198 | if (!p->chunks) { |
199 | ret = -ENOMEM; | |
200 | goto put_bo_list; | |
d38ceaf9 AD |
201 | } |
202 | ||
203 | for (i = 0; i < p->nchunks; i++) { | |
204 | struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL; | |
205 | struct drm_amdgpu_cs_chunk user_chunk; | |
206 | uint32_t __user *cdata; | |
207 | ||
028423b0 | 208 | chunk_ptr = (void __user *)(unsigned long)chunk_array[i]; |
d38ceaf9 AD |
209 | if (copy_from_user(&user_chunk, chunk_ptr, |
210 | sizeof(struct drm_amdgpu_cs_chunk))) { | |
1d263474 DC |
211 | ret = -EFAULT; |
212 | i--; | |
213 | goto free_partial_kdata; | |
d38ceaf9 AD |
214 | } |
215 | p->chunks[i].chunk_id = user_chunk.chunk_id; | |
216 | p->chunks[i].length_dw = user_chunk.length_dw; | |
d38ceaf9 AD |
217 | |
218 | size = p->chunks[i].length_dw; | |
028423b0 | 219 | cdata = (void __user *)(unsigned long)user_chunk.chunk_data; |
d38ceaf9 AD |
220 | p->chunks[i].user_ptr = cdata; |
221 | ||
222 | p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); | |
223 | if (p->chunks[i].kdata == NULL) { | |
1d263474 DC |
224 | ret = -ENOMEM; |
225 | i--; | |
226 | goto free_partial_kdata; | |
d38ceaf9 AD |
227 | } |
228 | size *= sizeof(uint32_t); | |
229 | if (copy_from_user(p->chunks[i].kdata, cdata, size)) { | |
1d263474 DC |
230 | ret = -EFAULT; |
231 | goto free_partial_kdata; | |
d38ceaf9 AD |
232 | } |
233 | ||
9a5e8fb1 CK |
234 | switch (p->chunks[i].chunk_id) { |
235 | case AMDGPU_CHUNK_ID_IB: | |
236 | p->num_ibs++; | |
237 | break; | |
238 | ||
239 | case AMDGPU_CHUNK_ID_FENCE: | |
d38ceaf9 | 240 | size = sizeof(struct drm_amdgpu_cs_chunk_fence); |
91acbeb6 | 241 | if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { |
1d263474 DC |
242 | ret = -EINVAL; |
243 | goto free_partial_kdata; | |
d38ceaf9 | 244 | } |
91acbeb6 CK |
245 | |
246 | ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata); | |
247 | if (ret) | |
248 | goto free_partial_kdata; | |
249 | ||
9a5e8fb1 CK |
250 | break; |
251 | ||
2b48d323 CK |
252 | case AMDGPU_CHUNK_ID_DEPENDENCIES: |
253 | break; | |
254 | ||
9a5e8fb1 | 255 | default: |
1d263474 DC |
256 | ret = -EINVAL; |
257 | goto free_partial_kdata; | |
d38ceaf9 AD |
258 | } |
259 | } | |
260 | ||
e60b344f | 261 | |
b203dd95 | 262 | p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL); |
1d263474 DC |
263 | if (!p->ibs) { |
264 | ret = -ENOMEM; | |
265 | goto free_all_kdata; | |
266 | } | |
d38ceaf9 | 267 | |
d38ceaf9 | 268 | kfree(chunk_array); |
1d263474 DC |
269 | return 0; |
270 | ||
271 | free_all_kdata: | |
272 | i = p->nchunks - 1; | |
273 | free_partial_kdata: | |
274 | for (; i >= 0; i--) | |
275 | drm_free_large(p->chunks[i].kdata); | |
276 | kfree(p->chunks); | |
277 | put_bo_list: | |
278 | if (p->bo_list) | |
279 | amdgpu_bo_list_put(p->bo_list); | |
280 | amdgpu_ctx_put(p->ctx); | |
281 | free_chunk: | |
282 | kfree(chunk_array); | |
283 | ||
284 | return ret; | |
d38ceaf9 AD |
285 | } |
286 | ||
287 | /* Returns how many bytes TTM can move per IB. | |
288 | */ | |
289 | static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) | |
290 | { | |
291 | u64 real_vram_size = adev->mc.real_vram_size; | |
292 | u64 vram_usage = atomic64_read(&adev->vram_usage); | |
293 | ||
294 | /* This function is based on the current VRAM usage. | |
295 | * | |
296 | * - If all of VRAM is free, allow relocating the number of bytes that | |
297 | * is equal to 1/4 of the size of VRAM for this IB. | |
298 | ||
299 | * - If more than one half of VRAM is occupied, only allow relocating | |
300 | * 1 MB of data for this IB. | |
301 | * | |
302 | * - From 0 to one half of used VRAM, the threshold decreases | |
303 | * linearly. | |
304 | * __________________ | |
305 | * 1/4 of -|\ | | |
306 | * VRAM | \ | | |
307 | * | \ | | |
308 | * | \ | | |
309 | * | \ | | |
310 | * | \ | | |
311 | * | \ | | |
312 | * | \________|1 MB | |
313 | * |----------------| | |
314 | * VRAM 0 % 100 % | |
315 | * used used | |
316 | * | |
317 | * Note: It's a threshold, not a limit. The threshold must be crossed | |
318 | * for buffer relocations to stop, so any buffer of an arbitrary size | |
319 | * can be moved as long as the threshold isn't crossed before | |
320 | * the relocation takes place. We don't want to disable buffer | |
321 | * relocations completely. | |
322 | * | |
323 | * The idea is that buffers should be placed in VRAM at creation time | |
324 | * and TTM should only do a minimum number of relocations during | |
325 | * command submission. In practice, you need to submit at least | |
326 | * a dozen IBs to move all buffers to VRAM if they are in GTT. | |
327 | * | |
328 | * Also, things can get pretty crazy under memory pressure and actual | |
329 | * VRAM usage can change a lot, so playing safe even at 50% does | |
330 | * consistently increase performance. | |
331 | */ | |
332 | ||
333 | u64 half_vram = real_vram_size >> 1; | |
334 | u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; | |
335 | u64 bytes_moved_threshold = half_free_vram >> 1; | |
336 | return max(bytes_moved_threshold, 1024*1024ull); | |
337 | } | |
338 | ||
a5b75058 CK |
339 | int amdgpu_cs_list_validate(struct amdgpu_device *adev, |
340 | struct amdgpu_vm *vm, | |
341 | struct list_head *validated) | |
d38ceaf9 | 342 | { |
d38ceaf9 | 343 | struct amdgpu_bo_list_entry *lobj; |
d38ceaf9 AD |
344 | struct amdgpu_bo *bo; |
345 | u64 bytes_moved = 0, initial_bytes_moved; | |
346 | u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev); | |
347 | int r; | |
348 | ||
a5b75058 | 349 | list_for_each_entry(lobj, validated, tv.head) { |
d38ceaf9 AD |
350 | bo = lobj->robj; |
351 | if (!bo->pin_count) { | |
352 | u32 domain = lobj->prefered_domains; | |
353 | u32 current_domain = | |
354 | amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); | |
355 | ||
356 | /* Check if this buffer will be moved and don't move it | |
357 | * if we have moved too many buffers for this IB already. | |
358 | * | |
359 | * Note that this allows moving at least one buffer of | |
360 | * any size, because it doesn't take the current "bo" | |
361 | * into account. We don't want to disallow buffer moves | |
362 | * completely. | |
363 | */ | |
270e869d | 364 | if ((lobj->allowed_domains & current_domain) != 0 && |
d38ceaf9 AD |
365 | (domain & current_domain) == 0 && /* will be moved */ |
366 | bytes_moved > bytes_moved_threshold) { | |
367 | /* don't move it */ | |
368 | domain = current_domain; | |
369 | } | |
370 | ||
371 | retry: | |
372 | amdgpu_ttm_placement_from_domain(bo, domain); | |
373 | initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); | |
374 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | |
375 | bytes_moved += atomic64_read(&adev->num_bytes_moved) - | |
376 | initial_bytes_moved; | |
377 | ||
378 | if (unlikely(r)) { | |
379 | if (r != -ERESTARTSYS && domain != lobj->allowed_domains) { | |
380 | domain = lobj->allowed_domains; | |
381 | goto retry; | |
382 | } | |
d38ceaf9 AD |
383 | return r; |
384 | } | |
385 | } | |
386 | lobj->bo_va = amdgpu_vm_bo_find(vm, bo); | |
387 | } | |
388 | return 0; | |
389 | } | |
390 | ||
391 | static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) | |
392 | { | |
393 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | |
394 | struct amdgpu_cs_buckets buckets; | |
a5b75058 | 395 | struct list_head duplicates; |
840d5144 | 396 | bool need_mmap_lock = false; |
d38ceaf9 AD |
397 | int i, r; |
398 | ||
840d5144 | 399 | if (p->bo_list) { |
400 | need_mmap_lock = p->bo_list->has_userptr; | |
401 | amdgpu_cs_buckets_init(&buckets); | |
402 | for (i = 0; i < p->bo_list->num_entries; i++) | |
403 | amdgpu_cs_buckets_add(&buckets, &p->bo_list->array[i].tv.head, | |
404 | p->bo_list->array[i].priority); | |
d38ceaf9 | 405 | |
840d5144 | 406 | amdgpu_cs_buckets_get_list(&buckets, &p->validated); |
407 | } | |
d38ceaf9 | 408 | |
3c0eea6c | 409 | INIT_LIST_HEAD(&duplicates); |
56467ebf | 410 | amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); |
d38ceaf9 | 411 | |
91acbeb6 CK |
412 | if (p->uf.bo) |
413 | list_add(&p->uf_entry.tv.head, &p->validated); | |
414 | ||
d38ceaf9 AD |
415 | if (need_mmap_lock) |
416 | down_read(¤t->mm->mmap_sem); | |
417 | ||
a5b75058 CK |
418 | r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); |
419 | if (unlikely(r != 0)) | |
420 | goto error_reserve; | |
421 | ||
ee1782c3 | 422 | amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates); |
56467ebf | 423 | |
d8e0cae6 | 424 | r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates); |
a5b75058 CK |
425 | if (r) |
426 | goto error_validate; | |
427 | ||
d8e0cae6 | 428 | r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated); |
a5b75058 CK |
429 | |
430 | error_validate: | |
eceb8a15 CK |
431 | if (r) { |
432 | amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm); | |
a5b75058 | 433 | ttm_eu_backoff_reservation(&p->ticket, &p->validated); |
eceb8a15 | 434 | } |
d38ceaf9 | 435 | |
a5b75058 | 436 | error_reserve: |
d38ceaf9 AD |
437 | if (need_mmap_lock) |
438 | up_read(¤t->mm->mmap_sem); | |
439 | ||
440 | return r; | |
441 | } | |
442 | ||
443 | static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) | |
444 | { | |
445 | struct amdgpu_bo_list_entry *e; | |
446 | int r; | |
447 | ||
448 | list_for_each_entry(e, &p->validated, tv.head) { | |
449 | struct reservation_object *resv = e->robj->tbo.resv; | |
450 | r = amdgpu_sync_resv(p->adev, &p->ibs[0].sync, resv, p->filp); | |
451 | ||
452 | if (r) | |
453 | return r; | |
454 | } | |
455 | return 0; | |
456 | } | |
457 | ||
458 | static int cmp_size_smaller_first(void *priv, struct list_head *a, | |
459 | struct list_head *b) | |
460 | { | |
461 | struct amdgpu_bo_list_entry *la = list_entry(a, struct amdgpu_bo_list_entry, tv.head); | |
462 | struct amdgpu_bo_list_entry *lb = list_entry(b, struct amdgpu_bo_list_entry, tv.head); | |
463 | ||
464 | /* Sort A before B if A is smaller. */ | |
465 | return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; | |
466 | } | |
467 | ||
984810fc CK |
468 | /** |
469 | * cs_parser_fini() - clean parser states | |
470 | * @parser: parser structure holding parsing context. | |
471 | * @error: error number | |
472 | * | |
473 | * If error is set than unvalidate buffer, otherwise just free memory | |
474 | * used by parsing context. | |
475 | **/ | |
476 | static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) | |
049fc527 | 477 | { |
eceb8a15 | 478 | struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; |
984810fc CK |
479 | unsigned i; |
480 | ||
d38ceaf9 | 481 | if (!error) { |
28b8d66e NH |
482 | amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm); |
483 | ||
d38ceaf9 AD |
484 | /* Sort the buffer list from the smallest to largest buffer, |
485 | * which affects the order of buffers in the LRU list. | |
486 | * This assures that the smallest buffers are added first | |
487 | * to the LRU list, so they are likely to be later evicted | |
488 | * first, instead of large buffers whose eviction is more | |
489 | * expensive. | |
490 | * | |
491 | * This slightly lowers the number of bytes moved by TTM | |
492 | * per frame under memory pressure. | |
493 | */ | |
494 | list_sort(NULL, &parser->validated, cmp_size_smaller_first); | |
495 | ||
496 | ttm_eu_fence_buffer_objects(&parser->ticket, | |
984810fc CK |
497 | &parser->validated, |
498 | parser->fence); | |
d38ceaf9 AD |
499 | } else if (backoff) { |
500 | ttm_eu_backoff_reservation(&parser->ticket, | |
501 | &parser->validated); | |
502 | } | |
984810fc | 503 | fence_put(parser->fence); |
7e52a81c | 504 | |
3cb485f3 CK |
505 | if (parser->ctx) |
506 | amdgpu_ctx_put(parser->ctx); | |
a3348bb8 CZ |
507 | if (parser->bo_list) |
508 | amdgpu_bo_list_put(parser->bo_list); | |
509 | ||
d38ceaf9 AD |
510 | for (i = 0; i < parser->nchunks; i++) |
511 | drm_free_large(parser->chunks[i].kdata); | |
512 | kfree(parser->chunks); | |
e4a58a28 CK |
513 | if (parser->ibs) |
514 | for (i = 0; i < parser->num_ibs; i++) | |
515 | amdgpu_ib_free(parser->adev, &parser->ibs[i]); | |
516 | kfree(parser->ibs); | |
91acbeb6 CK |
517 | amdgpu_bo_unref(&parser->uf.bo); |
518 | amdgpu_bo_unref(&parser->uf_entry.robj); | |
d38ceaf9 AD |
519 | } |
520 | ||
521 | static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, | |
522 | struct amdgpu_vm *vm) | |
523 | { | |
524 | struct amdgpu_device *adev = p->adev; | |
525 | struct amdgpu_bo_va *bo_va; | |
526 | struct amdgpu_bo *bo; | |
527 | int i, r; | |
528 | ||
529 | r = amdgpu_vm_update_page_directory(adev, vm); | |
530 | if (r) | |
531 | return r; | |
532 | ||
05906dec BN |
533 | r = amdgpu_sync_fence(adev, &p->ibs[0].sync, vm->page_directory_fence); |
534 | if (r) | |
535 | return r; | |
536 | ||
d38ceaf9 AD |
537 | r = amdgpu_vm_clear_freed(adev, vm); |
538 | if (r) | |
539 | return r; | |
540 | ||
541 | if (p->bo_list) { | |
542 | for (i = 0; i < p->bo_list->num_entries; i++) { | |
91e1a520 CK |
543 | struct fence *f; |
544 | ||
d38ceaf9 AD |
545 | /* ignore duplicates */ |
546 | bo = p->bo_list->array[i].robj; | |
547 | if (!bo) | |
548 | continue; | |
549 | ||
550 | bo_va = p->bo_list->array[i].bo_va; | |
551 | if (bo_va == NULL) | |
552 | continue; | |
553 | ||
554 | r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem); | |
555 | if (r) | |
556 | return r; | |
557 | ||
bb1e38a4 | 558 | f = bo_va->last_pt_update; |
91e1a520 CK |
559 | r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f); |
560 | if (r) | |
561 | return r; | |
d38ceaf9 | 562 | } |
b495bd3a CK |
563 | |
564 | } | |
565 | ||
566 | r = amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync); | |
567 | ||
568 | if (amdgpu_vm_debug && p->bo_list) { | |
569 | /* Invalidate all BOs to test for userspace bugs */ | |
570 | for (i = 0; i < p->bo_list->num_entries; i++) { | |
571 | /* ignore duplicates */ | |
572 | bo = p->bo_list->array[i].robj; | |
573 | if (!bo) | |
574 | continue; | |
575 | ||
576 | amdgpu_vm_bo_invalidate(adev, bo); | |
577 | } | |
d38ceaf9 AD |
578 | } |
579 | ||
b495bd3a | 580 | return r; |
d38ceaf9 AD |
581 | } |
582 | ||
583 | static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, | |
584 | struct amdgpu_cs_parser *parser) | |
585 | { | |
586 | struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; | |
587 | struct amdgpu_vm *vm = &fpriv->vm; | |
588 | struct amdgpu_ring *ring; | |
589 | int i, r; | |
590 | ||
591 | if (parser->num_ibs == 0) | |
592 | return 0; | |
593 | ||
594 | /* Only for UVD/VCE VM emulation */ | |
595 | for (i = 0; i < parser->num_ibs; i++) { | |
596 | ring = parser->ibs[i].ring; | |
597 | if (ring->funcs->parse_cs) { | |
598 | r = amdgpu_ring_parse_cs(ring, parser, i); | |
599 | if (r) | |
600 | return r; | |
601 | } | |
602 | } | |
603 | ||
d38ceaf9 | 604 | r = amdgpu_bo_vm_update_pte(parser, vm); |
984810fc CK |
605 | if (!r) |
606 | amdgpu_cs_sync_rings(parser); | |
d38ceaf9 | 607 | |
d38ceaf9 AD |
608 | return r; |
609 | } | |
610 | ||
611 | static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r) | |
612 | { | |
613 | if (r == -EDEADLK) { | |
614 | r = amdgpu_gpu_reset(adev); | |
615 | if (!r) | |
616 | r = -EAGAIN; | |
617 | } | |
618 | return r; | |
619 | } | |
620 | ||
621 | static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | |
622 | struct amdgpu_cs_parser *parser) | |
623 | { | |
624 | struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; | |
625 | struct amdgpu_vm *vm = &fpriv->vm; | |
626 | int i, j; | |
627 | int r; | |
628 | ||
629 | for (i = 0, j = 0; i < parser->nchunks && j < parser->num_ibs; i++) { | |
630 | struct amdgpu_cs_chunk *chunk; | |
631 | struct amdgpu_ib *ib; | |
632 | struct drm_amdgpu_cs_chunk_ib *chunk_ib; | |
d38ceaf9 | 633 | struct amdgpu_ring *ring; |
d38ceaf9 AD |
634 | |
635 | chunk = &parser->chunks[i]; | |
636 | ib = &parser->ibs[j]; | |
637 | chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata; | |
638 | ||
639 | if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) | |
640 | continue; | |
641 | ||
d38ceaf9 AD |
642 | r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type, |
643 | chunk_ib->ip_instance, chunk_ib->ring, | |
644 | &ring); | |
3ccec53c | 645 | if (r) |
d38ceaf9 | 646 | return r; |
d38ceaf9 AD |
647 | |
648 | if (ring->funcs->parse_cs) { | |
4802ce11 | 649 | struct amdgpu_bo_va_mapping *m; |
3ccec53c | 650 | struct amdgpu_bo *aobj = NULL; |
4802ce11 CK |
651 | uint64_t offset; |
652 | uint8_t *kptr; | |
3ccec53c | 653 | |
4802ce11 CK |
654 | m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start, |
655 | &aobj); | |
3ccec53c MO |
656 | if (!aobj) { |
657 | DRM_ERROR("IB va_start is invalid\n"); | |
658 | return -EINVAL; | |
d38ceaf9 AD |
659 | } |
660 | ||
4802ce11 CK |
661 | if ((chunk_ib->va_start + chunk_ib->ib_bytes) > |
662 | (m->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) { | |
663 | DRM_ERROR("IB va_start+ib_bytes is invalid\n"); | |
664 | return -EINVAL; | |
665 | } | |
666 | ||
3ccec53c | 667 | /* the IB should be reserved at this point */ |
4802ce11 | 668 | r = amdgpu_bo_kmap(aobj, (void **)&kptr); |
d38ceaf9 | 669 | if (r) { |
d38ceaf9 AD |
670 | return r; |
671 | } | |
672 | ||
4802ce11 CK |
673 | offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE; |
674 | kptr += chunk_ib->va_start - offset; | |
675 | ||
d38ceaf9 AD |
676 | r = amdgpu_ib_get(ring, NULL, chunk_ib->ib_bytes, ib); |
677 | if (r) { | |
678 | DRM_ERROR("Failed to get ib !\n"); | |
d38ceaf9 AD |
679 | return r; |
680 | } | |
681 | ||
682 | memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); | |
683 | amdgpu_bo_kunmap(aobj); | |
d38ceaf9 AD |
684 | } else { |
685 | r = amdgpu_ib_get(ring, vm, 0, ib); | |
686 | if (r) { | |
687 | DRM_ERROR("Failed to get ib !\n"); | |
d38ceaf9 AD |
688 | return r; |
689 | } | |
690 | ||
691 | ib->gpu_addr = chunk_ib->va_start; | |
692 | } | |
d38ceaf9 | 693 | |
3ccec53c | 694 | ib->length_dw = chunk_ib->ib_bytes / 4; |
de807f81 | 695 | ib->flags = chunk_ib->flags; |
3cb485f3 | 696 | ib->ctx = parser->ctx; |
d38ceaf9 AD |
697 | j++; |
698 | } | |
699 | ||
700 | if (!parser->num_ibs) | |
701 | return 0; | |
702 | ||
703 | /* add GDS resources to first IB */ | |
704 | if (parser->bo_list) { | |
705 | struct amdgpu_bo *gds = parser->bo_list->gds_obj; | |
706 | struct amdgpu_bo *gws = parser->bo_list->gws_obj; | |
707 | struct amdgpu_bo *oa = parser->bo_list->oa_obj; | |
708 | struct amdgpu_ib *ib = &parser->ibs[0]; | |
709 | ||
710 | if (gds) { | |
711 | ib->gds_base = amdgpu_bo_gpu_offset(gds); | |
712 | ib->gds_size = amdgpu_bo_size(gds); | |
713 | } | |
714 | if (gws) { | |
715 | ib->gws_base = amdgpu_bo_gpu_offset(gws); | |
716 | ib->gws_size = amdgpu_bo_size(gws); | |
717 | } | |
718 | if (oa) { | |
719 | ib->oa_base = amdgpu_bo_gpu_offset(oa); | |
720 | ib->oa_size = amdgpu_bo_size(oa); | |
721 | } | |
722 | } | |
d38ceaf9 AD |
723 | /* wrap the last IB with user fence */ |
724 | if (parser->uf.bo) { | |
725 | struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1]; | |
726 | ||
727 | /* UVD & VCE fw doesn't support user fences */ | |
728 | if (ib->ring->type == AMDGPU_RING_TYPE_UVD || | |
729 | ib->ring->type == AMDGPU_RING_TYPE_VCE) | |
730 | return -EINVAL; | |
731 | ||
732 | ib->user = &parser->uf; | |
733 | } | |
734 | ||
735 | return 0; | |
736 | } | |
737 | ||
2b48d323 CK |
738 | static int amdgpu_cs_dependencies(struct amdgpu_device *adev, |
739 | struct amdgpu_cs_parser *p) | |
740 | { | |
76a1ea61 | 741 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
2b48d323 CK |
742 | struct amdgpu_ib *ib; |
743 | int i, j, r; | |
744 | ||
745 | if (!p->num_ibs) | |
746 | return 0; | |
747 | ||
748 | /* Add dependencies to first IB */ | |
749 | ib = &p->ibs[0]; | |
750 | for (i = 0; i < p->nchunks; ++i) { | |
751 | struct drm_amdgpu_cs_chunk_dep *deps; | |
752 | struct amdgpu_cs_chunk *chunk; | |
753 | unsigned num_deps; | |
754 | ||
755 | chunk = &p->chunks[i]; | |
756 | ||
757 | if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES) | |
758 | continue; | |
759 | ||
760 | deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata; | |
761 | num_deps = chunk->length_dw * 4 / | |
762 | sizeof(struct drm_amdgpu_cs_chunk_dep); | |
763 | ||
764 | for (j = 0; j < num_deps; ++j) { | |
2b48d323 | 765 | struct amdgpu_ring *ring; |
76a1ea61 | 766 | struct amdgpu_ctx *ctx; |
21c16bf6 | 767 | struct fence *fence; |
2b48d323 CK |
768 | |
769 | r = amdgpu_cs_get_ring(adev, deps[j].ip_type, | |
770 | deps[j].ip_instance, | |
771 | deps[j].ring, &ring); | |
772 | if (r) | |
773 | return r; | |
774 | ||
76a1ea61 CK |
775 | ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id); |
776 | if (ctx == NULL) | |
777 | return -EINVAL; | |
778 | ||
21c16bf6 CK |
779 | fence = amdgpu_ctx_get_fence(ctx, ring, |
780 | deps[j].handle); | |
781 | if (IS_ERR(fence)) { | |
782 | r = PTR_ERR(fence); | |
76a1ea61 | 783 | amdgpu_ctx_put(ctx); |
2b48d323 | 784 | return r; |
91e1a520 | 785 | |
21c16bf6 CK |
786 | } else if (fence) { |
787 | r = amdgpu_sync_fence(adev, &ib->sync, fence); | |
788 | fence_put(fence); | |
789 | amdgpu_ctx_put(ctx); | |
790 | if (r) | |
791 | return r; | |
792 | } | |
2b48d323 CK |
793 | } |
794 | } | |
795 | ||
796 | return 0; | |
797 | } | |
798 | ||
4c7eb91c | 799 | static int amdgpu_cs_free_job(struct amdgpu_job *job) |
bb977d37 CZ |
800 | { |
801 | int i; | |
4c7eb91c JZ |
802 | if (job->ibs) |
803 | for (i = 0; i < job->num_ibs; i++) | |
804 | amdgpu_ib_free(job->adev, &job->ibs[i]); | |
805 | kfree(job->ibs); | |
806 | if (job->uf.bo) | |
f3f17692 | 807 | amdgpu_bo_unref(&job->uf.bo); |
bb977d37 CZ |
808 | return 0; |
809 | } | |
810 | ||
049fc527 CZ |
811 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
812 | { | |
813 | struct amdgpu_device *adev = dev->dev_private; | |
814 | union drm_amdgpu_cs *cs = data; | |
7e52a81c | 815 | struct amdgpu_cs_parser parser = {}; |
26a6980c CK |
816 | bool reserved_buffers = false; |
817 | int i, r; | |
049fc527 | 818 | |
0c418f10 | 819 | if (!adev->accel_working) |
049fc527 | 820 | return -EBUSY; |
2b48d323 | 821 | |
7e52a81c CK |
822 | parser.adev = adev; |
823 | parser.filp = filp; | |
824 | ||
825 | r = amdgpu_cs_parser_init(&parser, data); | |
d38ceaf9 | 826 | if (r) { |
049fc527 | 827 | DRM_ERROR("Failed to initialize parser !\n"); |
7e52a81c | 828 | amdgpu_cs_parser_fini(&parser, r, false); |
d38ceaf9 AD |
829 | r = amdgpu_cs_handle_lockup(adev, r); |
830 | return r; | |
831 | } | |
7e52a81c | 832 | r = amdgpu_cs_parser_relocs(&parser); |
26a6980c CK |
833 | if (r == -ENOMEM) |
834 | DRM_ERROR("Not enough memory for command submission!\n"); | |
835 | else if (r && r != -ERESTARTSYS) | |
836 | DRM_ERROR("Failed to process the buffer list %d!\n", r); | |
837 | else if (!r) { | |
838 | reserved_buffers = true; | |
7e52a81c | 839 | r = amdgpu_cs_ib_fill(adev, &parser); |
26a6980c CK |
840 | } |
841 | ||
842 | if (!r) { | |
7e52a81c | 843 | r = amdgpu_cs_dependencies(adev, &parser); |
26a6980c CK |
844 | if (r) |
845 | DRM_ERROR("Failed in the dependencies handling %d!\n", r); | |
846 | } | |
847 | ||
848 | if (r) | |
849 | goto out; | |
850 | ||
7e52a81c CK |
851 | for (i = 0; i < parser.num_ibs; i++) |
852 | trace_amdgpu_cs(&parser, i); | |
26a6980c | 853 | |
7e52a81c | 854 | r = amdgpu_cs_ib_vm_chunk(adev, &parser); |
4fe63117 CZ |
855 | if (r) |
856 | goto out; | |
857 | ||
7e52a81c | 858 | if (amdgpu_enable_scheduler && parser.num_ibs) { |
7e52a81c | 859 | struct amdgpu_ring * ring = parser.ibs->ring; |
e2840221 CK |
860 | struct amd_sched_fence *fence; |
861 | struct amdgpu_job *job; | |
7e52a81c | 862 | |
bb977d37 | 863 | job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); |
4cfdcd9c DC |
864 | if (!job) { |
865 | r = -ENOMEM; | |
866 | goto out; | |
867 | } | |
7e52a81c | 868 | |
4f839a24 | 869 | job->base.sched = &ring->sched; |
7e52a81c CK |
870 | job->base.s_entity = &parser.ctx->rings[ring->idx].entity; |
871 | job->adev = parser.adev; | |
e2840221 CK |
872 | job->owner = parser.filp; |
873 | job->free_job = amdgpu_cs_free_job; | |
874 | ||
5d82730a CK |
875 | job->ibs = parser.ibs; |
876 | job->num_ibs = parser.num_ibs; | |
877 | parser.ibs = NULL; | |
878 | parser.num_ibs = 0; | |
879 | ||
bb977d37 | 880 | if (job->ibs[job->num_ibs - 1].user) { |
7e52a81c | 881 | job->uf = parser.uf; |
bb977d37 | 882 | job->ibs[job->num_ibs - 1].user = &job->uf; |
7e52a81c | 883 | parser.uf.bo = NULL; |
bb977d37 CZ |
884 | } |
885 | ||
e2840221 CK |
886 | fence = amd_sched_fence_create(job->base.s_entity, |
887 | parser.filp); | |
888 | if (!fence) { | |
889 | r = -ENOMEM; | |
bb977d37 CZ |
890 | amdgpu_cs_free_job(job); |
891 | kfree(job); | |
f556cb0c CZ |
892 | goto out; |
893 | } | |
e2840221 | 894 | job->base.s_fence = fence; |
984810fc | 895 | parser.fence = fence_get(&fence->base); |
e2840221 CK |
896 | |
897 | cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring, | |
898 | &fence->base); | |
e4a58a28 | 899 | job->ibs[job->num_ibs - 1].sequence = cs->out.handle; |
eb98d1c5 | 900 | |
7034decf | 901 | trace_amdgpu_cs_ioctl(job); |
e2840221 CK |
902 | amd_sched_entity_push_job(&job->base); |
903 | ||
984810fc CK |
904 | } else { |
905 | struct amdgpu_fence *fence; | |
e2840221 | 906 | |
984810fc CK |
907 | r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs, |
908 | parser.filp); | |
909 | fence = parser.ibs[parser.num_ibs - 1].fence; | |
910 | parser.fence = fence_get(&fence->base); | |
911 | cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence; | |
d38ceaf9 AD |
912 | } |
913 | ||
d38ceaf9 | 914 | out: |
7e52a81c | 915 | amdgpu_cs_parser_fini(&parser, r, reserved_buffers); |
d38ceaf9 AD |
916 | r = amdgpu_cs_handle_lockup(adev, r); |
917 | return r; | |
918 | } | |
919 | ||
920 | /** | |
921 | * amdgpu_cs_wait_ioctl - wait for a command submission to finish | |
922 | * | |
923 | * @dev: drm device | |
924 | * @data: data from userspace | |
925 | * @filp: file private | |
926 | * | |
927 | * Wait for the command submission identified by handle to finish. | |
928 | */ | |
929 | int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, | |
930 | struct drm_file *filp) | |
931 | { | |
932 | union drm_amdgpu_wait_cs *wait = data; | |
933 | struct amdgpu_device *adev = dev->dev_private; | |
d38ceaf9 | 934 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); |
03507c4f | 935 | struct amdgpu_ring *ring = NULL; |
66b3cf2a | 936 | struct amdgpu_ctx *ctx; |
21c16bf6 | 937 | struct fence *fence; |
d38ceaf9 AD |
938 | long r; |
939 | ||
21c16bf6 CK |
940 | r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, |
941 | wait->in.ring, &ring); | |
942 | if (r) | |
943 | return r; | |
944 | ||
66b3cf2a JZ |
945 | ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); |
946 | if (ctx == NULL) | |
947 | return -EINVAL; | |
d38ceaf9 | 948 | |
4b559c90 CZ |
949 | fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle); |
950 | if (IS_ERR(fence)) | |
951 | r = PTR_ERR(fence); | |
952 | else if (fence) { | |
953 | r = fence_wait_timeout(fence, true, timeout); | |
954 | fence_put(fence); | |
955 | } else | |
956 | r = 1; | |
049fc527 | 957 | |
66b3cf2a | 958 | amdgpu_ctx_put(ctx); |
d38ceaf9 AD |
959 | if (r < 0) |
960 | return r; | |
961 | ||
962 | memset(wait, 0, sizeof(*wait)); | |
963 | wait->out.status = (r == 0); | |
964 | ||
965 | return 0; | |
966 | } | |
967 | ||
968 | /** | |
969 | * amdgpu_cs_find_bo_va - find bo_va for VM address | |
970 | * | |
971 | * @parser: command submission parser context | |
972 | * @addr: VM address | |
973 | * @bo: resulting BO of the mapping found | |
974 | * | |
975 | * Search the buffer objects in the command submission context for a certain | |
976 | * virtual memory address. Returns allocation structure when found, NULL | |
977 | * otherwise. | |
978 | */ | |
979 | struct amdgpu_bo_va_mapping * | |
980 | amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, | |
981 | uint64_t addr, struct amdgpu_bo **bo) | |
982 | { | |
983 | struct amdgpu_bo_list_entry *reloc; | |
984 | struct amdgpu_bo_va_mapping *mapping; | |
985 | ||
986 | addr /= AMDGPU_GPU_PAGE_SIZE; | |
987 | ||
988 | list_for_each_entry(reloc, &parser->validated, tv.head) { | |
989 | if (!reloc->bo_va) | |
990 | continue; | |
991 | ||
7fc11959 CK |
992 | list_for_each_entry(mapping, &reloc->bo_va->valids, list) { |
993 | if (mapping->it.start > addr || | |
994 | addr > mapping->it.last) | |
995 | continue; | |
996 | ||
997 | *bo = reloc->bo_va->bo; | |
998 | return mapping; | |
999 | } | |
1000 | ||
1001 | list_for_each_entry(mapping, &reloc->bo_va->invalids, list) { | |
d38ceaf9 AD |
1002 | if (mapping->it.start > addr || |
1003 | addr > mapping->it.last) | |
1004 | continue; | |
1005 | ||
1006 | *bo = reloc->bo_va->bo; | |
1007 | return mapping; | |
1008 | } | |
1009 | } | |
1010 | ||
1011 | return NULL; | |
1012 | } |