]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drm/amdkfd: Improve multiple SDMA queues support per process
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_fence.c
1 /*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26 /*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Dave Airlie
30 */
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <drm/drmP.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40
41 /*
42 * Fences
43 * Fences mark an event in the GPUs pipeline and are used
44 * for GPU/CPU synchronization. When the fence is written,
45 * it is expected that all buffers associated with that fence
46 * are no longer in use by the associated ring on the GPU and
47 * that the the relevant GPU caches have been flushed.
48 */
49
50 struct amdgpu_fence {
51 struct dma_fence base;
52
53 /* RB, DMA, etc. */
54 struct amdgpu_ring *ring;
55 };
56
57 static struct kmem_cache *amdgpu_fence_slab;
58
59 int amdgpu_fence_slab_init(void)
60 {
61 amdgpu_fence_slab = kmem_cache_create(
62 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
63 SLAB_HWCACHE_ALIGN, NULL);
64 if (!amdgpu_fence_slab)
65 return -ENOMEM;
66 return 0;
67 }
68
69 void amdgpu_fence_slab_fini(void)
70 {
71 rcu_barrier();
72 kmem_cache_destroy(amdgpu_fence_slab);
73 }
74 /*
75 * Cast helper
76 */
77 static const struct dma_fence_ops amdgpu_fence_ops;
78 static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
79 {
80 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
81
82 if (__f->base.ops == &amdgpu_fence_ops)
83 return __f;
84
85 return NULL;
86 }
87
88 /**
89 * amdgpu_fence_write - write a fence value
90 *
91 * @ring: ring the fence is associated with
92 * @seq: sequence number to write
93 *
94 * Writes a fence value to memory (all asics).
95 */
96 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
97 {
98 struct amdgpu_fence_driver *drv = &ring->fence_drv;
99
100 if (drv->cpu_addr)
101 *drv->cpu_addr = cpu_to_le32(seq);
102 }
103
104 /**
105 * amdgpu_fence_read - read a fence value
106 *
107 * @ring: ring the fence is associated with
108 *
109 * Reads a fence value from memory (all asics).
110 * Returns the value of the fence read from memory.
111 */
112 static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
113 {
114 struct amdgpu_fence_driver *drv = &ring->fence_drv;
115 u32 seq = 0;
116
117 if (drv->cpu_addr)
118 seq = le32_to_cpu(*drv->cpu_addr);
119 else
120 seq = atomic_read(&drv->last_seq);
121
122 return seq;
123 }
124
125 /**
126 * amdgpu_fence_emit - emit a fence on the requested ring
127 *
128 * @ring: ring the fence is associated with
129 * @f: resulting fence object
130 *
131 * Emits a fence command on the requested ring (all asics).
132 * Returns 0 on success, -ENOMEM on failure.
133 */
134 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
135 {
136 struct amdgpu_device *adev = ring->adev;
137 struct amdgpu_fence *fence;
138 struct dma_fence *old, **ptr;
139 uint32_t seq;
140
141 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
142 if (fence == NULL)
143 return -ENOMEM;
144
145 seq = ++ring->fence_drv.sync_seq;
146 fence->ring = ring;
147 dma_fence_init(&fence->base, &amdgpu_fence_ops,
148 &ring->fence_drv.lock,
149 adev->fence_context + ring->idx,
150 seq);
151 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
152 seq, AMDGPU_FENCE_FLAG_INT);
153
154 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
155 /* This function can't be called concurrently anyway, otherwise
156 * emitting the fence would mess up the hardware ring buffer.
157 */
158 old = rcu_dereference_protected(*ptr, 1);
159 if (old && !dma_fence_is_signaled(old)) {
160 DRM_INFO("rcu slot is busy\n");
161 dma_fence_wait(old, false);
162 }
163
164 rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
165
166 *f = &fence->base;
167
168 return 0;
169 }
170
171 /**
172 * amdgpu_fence_schedule_fallback - schedule fallback check
173 *
174 * @ring: pointer to struct amdgpu_ring
175 *
176 * Start a timer as fallback to our interrupts.
177 */
178 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
179 {
180 mod_timer(&ring->fence_drv.fallback_timer,
181 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
182 }
183
184 /**
185 * amdgpu_fence_process - check for fence activity
186 *
187 * @ring: pointer to struct amdgpu_ring
188 *
189 * Checks the current fence value and calculates the last
190 * signalled fence value. Wakes the fence queue if the
191 * sequence number has increased.
192 */
193 void amdgpu_fence_process(struct amdgpu_ring *ring)
194 {
195 struct amdgpu_fence_driver *drv = &ring->fence_drv;
196 uint32_t seq, last_seq;
197 int r;
198
199 do {
200 last_seq = atomic_read(&ring->fence_drv.last_seq);
201 seq = amdgpu_fence_read(ring);
202
203 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
204
205 if (seq != ring->fence_drv.sync_seq)
206 amdgpu_fence_schedule_fallback(ring);
207
208 if (unlikely(seq == last_seq))
209 return;
210
211 last_seq &= drv->num_fences_mask;
212 seq &= drv->num_fences_mask;
213
214 do {
215 struct dma_fence *fence, **ptr;
216
217 ++last_seq;
218 last_seq &= drv->num_fences_mask;
219 ptr = &drv->fences[last_seq];
220
221 /* There is always exactly one thread signaling this fence slot */
222 fence = rcu_dereference_protected(*ptr, 1);
223 RCU_INIT_POINTER(*ptr, NULL);
224
225 if (!fence)
226 continue;
227
228 r = dma_fence_signal(fence);
229 if (!r)
230 DMA_FENCE_TRACE(fence, "signaled from irq context\n");
231 else
232 BUG();
233
234 dma_fence_put(fence);
235 } while (last_seq != seq);
236 }
237
238 /**
239 * amdgpu_fence_fallback - fallback for hardware interrupts
240 *
241 * @work: delayed work item
242 *
243 * Checks for fence activity.
244 */
245 static void amdgpu_fence_fallback(unsigned long arg)
246 {
247 struct amdgpu_ring *ring = (void *)arg;
248
249 amdgpu_fence_process(ring);
250 }
251
252 /**
253 * amdgpu_fence_wait_empty - wait for all fences to signal
254 *
255 * @adev: amdgpu device pointer
256 * @ring: ring index the fence is associated with
257 *
258 * Wait for all fences on the requested ring to signal (all asics).
259 * Returns 0 if the fences have passed, error for all other cases.
260 */
261 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
262 {
263 uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
264 struct dma_fence *fence, **ptr;
265 int r;
266
267 if (!seq)
268 return 0;
269
270 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
271 rcu_read_lock();
272 fence = rcu_dereference(*ptr);
273 if (!fence || !dma_fence_get_rcu(fence)) {
274 rcu_read_unlock();
275 return 0;
276 }
277 rcu_read_unlock();
278
279 r = dma_fence_wait(fence, false);
280 dma_fence_put(fence);
281 return r;
282 }
283
284 /**
285 * amdgpu_fence_count_emitted - get the count of emitted fences
286 *
287 * @ring: ring the fence is associated with
288 *
289 * Get the number of fences emitted on the requested ring (all asics).
290 * Returns the number of emitted fences on the ring. Used by the
291 * dynpm code to ring track activity.
292 */
293 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
294 {
295 uint64_t emitted;
296
297 /* We are not protected by ring lock when reading the last sequence
298 * but it's ok to report slightly wrong fence count here.
299 */
300 amdgpu_fence_process(ring);
301 emitted = 0x100000000ull;
302 emitted -= atomic_read(&ring->fence_drv.last_seq);
303 emitted += ACCESS_ONCE(ring->fence_drv.sync_seq);
304 return lower_32_bits(emitted);
305 }
306
307 /**
308 * amdgpu_fence_driver_start_ring - make the fence driver
309 * ready for use on the requested ring.
310 *
311 * @ring: ring to start the fence driver on
312 * @irq_src: interrupt source to use for this ring
313 * @irq_type: interrupt type to use for this ring
314 *
315 * Make the fence driver ready for processing (all asics).
316 * Not all asics have all rings, so each asic will only
317 * start the fence driver on the rings it has.
318 * Returns 0 for success, errors for failure.
319 */
320 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
321 struct amdgpu_irq_src *irq_src,
322 unsigned irq_type)
323 {
324 struct amdgpu_device *adev = ring->adev;
325 uint64_t index;
326
327 if (ring != &adev->uvd.ring) {
328 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
329 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
330 } else {
331 /* put fence directly behind firmware */
332 index = ALIGN(adev->uvd.fw->size, 8);
333 ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
334 ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
335 }
336 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
337 amdgpu_irq_get(adev, irq_src, irq_type);
338
339 ring->fence_drv.irq_src = irq_src;
340 ring->fence_drv.irq_type = irq_type;
341 ring->fence_drv.initialized = true;
342
343 dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
344 "cpu addr 0x%p\n", ring->idx,
345 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
346 return 0;
347 }
348
349 /**
350 * amdgpu_fence_driver_init_ring - init the fence driver
351 * for the requested ring.
352 *
353 * @ring: ring to init the fence driver on
354 * @num_hw_submission: number of entries on the hardware queue
355 *
356 * Init the fence driver for the requested ring (all asics).
357 * Helper function for amdgpu_fence_driver_init().
358 */
359 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
360 unsigned num_hw_submission)
361 {
362 long timeout;
363 int r;
364
365 /* Check that num_hw_submission is a power of two */
366 if ((num_hw_submission & (num_hw_submission - 1)) != 0)
367 return -EINVAL;
368
369 ring->fence_drv.cpu_addr = NULL;
370 ring->fence_drv.gpu_addr = 0;
371 ring->fence_drv.sync_seq = 0;
372 atomic_set(&ring->fence_drv.last_seq, 0);
373 ring->fence_drv.initialized = false;
374
375 setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
376 (unsigned long)ring);
377
378 ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
379 spin_lock_init(&ring->fence_drv.lock);
380 ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
381 GFP_KERNEL);
382 if (!ring->fence_drv.fences)
383 return -ENOMEM;
384
385 /* No need to setup the GPU scheduler for KIQ ring */
386 if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
387 timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
388 if (timeout == 0) {
389 /*
390 * FIXME:
391 * Delayed workqueue cannot use it directly,
392 * so the scheduler will not use delayed workqueue if
393 * MAX_SCHEDULE_TIMEOUT is set.
394 * Currently keep it simple and silly.
395 */
396 timeout = MAX_SCHEDULE_TIMEOUT;
397 }
398 r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
399 num_hw_submission,
400 timeout, ring->name);
401 if (r) {
402 DRM_ERROR("Failed to create scheduler on ring %s.\n",
403 ring->name);
404 return r;
405 }
406 }
407
408 return 0;
409 }
410
411 /**
412 * amdgpu_fence_driver_init - init the fence driver
413 * for all possible rings.
414 *
415 * @adev: amdgpu device pointer
416 *
417 * Init the fence driver for all possible rings (all asics).
418 * Not all asics have all rings, so each asic will only
419 * start the fence driver on the rings it has using
420 * amdgpu_fence_driver_start_ring().
421 * Returns 0 for success.
422 */
423 int amdgpu_fence_driver_init(struct amdgpu_device *adev)
424 {
425 if (amdgpu_debugfs_fence_init(adev))
426 dev_err(adev->dev, "fence debugfs file creation failed\n");
427
428 return 0;
429 }
430
431 /**
432 * amdgpu_fence_driver_fini - tear down the fence driver
433 * for all possible rings.
434 *
435 * @adev: amdgpu device pointer
436 *
437 * Tear down the fence driver for all possible rings (all asics).
438 */
439 void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
440 {
441 unsigned i, j;
442 int r;
443
444 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
445 struct amdgpu_ring *ring = adev->rings[i];
446
447 if (!ring || !ring->fence_drv.initialized)
448 continue;
449 r = amdgpu_fence_wait_empty(ring);
450 if (r) {
451 /* no need to trigger GPU reset as we are unloading */
452 amdgpu_fence_driver_force_completion(adev);
453 }
454 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
455 ring->fence_drv.irq_type);
456 amd_sched_fini(&ring->sched);
457 del_timer_sync(&ring->fence_drv.fallback_timer);
458 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
459 dma_fence_put(ring->fence_drv.fences[j]);
460 kfree(ring->fence_drv.fences);
461 ring->fence_drv.fences = NULL;
462 ring->fence_drv.initialized = false;
463 }
464 }
465
466 /**
467 * amdgpu_fence_driver_suspend - suspend the fence driver
468 * for all possible rings.
469 *
470 * @adev: amdgpu device pointer
471 *
472 * Suspend the fence driver for all possible rings (all asics).
473 */
474 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
475 {
476 int i, r;
477
478 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
479 struct amdgpu_ring *ring = adev->rings[i];
480 if (!ring || !ring->fence_drv.initialized)
481 continue;
482
483 /* wait for gpu to finish processing current batch */
484 r = amdgpu_fence_wait_empty(ring);
485 if (r) {
486 /* delay GPU reset to resume */
487 amdgpu_fence_driver_force_completion(adev);
488 }
489
490 /* disable the interrupt */
491 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
492 ring->fence_drv.irq_type);
493 }
494 }
495
496 /**
497 * amdgpu_fence_driver_resume - resume the fence driver
498 * for all possible rings.
499 *
500 * @adev: amdgpu device pointer
501 *
502 * Resume the fence driver for all possible rings (all asics).
503 * Not all asics have all rings, so each asic will only
504 * start the fence driver on the rings it has using
505 * amdgpu_fence_driver_start_ring().
506 * Returns 0 for success.
507 */
508 void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
509 {
510 int i;
511
512 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
513 struct amdgpu_ring *ring = adev->rings[i];
514 if (!ring || !ring->fence_drv.initialized)
515 continue;
516
517 /* enable the interrupt */
518 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
519 ring->fence_drv.irq_type);
520 }
521 }
522
523 /**
524 * amdgpu_fence_driver_force_completion - force all fence waiter to complete
525 *
526 * @adev: amdgpu device pointer
527 *
528 * In case of GPU reset failure make sure no process keep waiting on fence
529 * that will never complete.
530 */
531 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
532 {
533 int i;
534
535 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
536 struct amdgpu_ring *ring = adev->rings[i];
537 if (!ring || !ring->fence_drv.initialized)
538 continue;
539
540 amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
541 }
542 }
543
544 void amdgpu_fence_driver_force_completion_ring(struct amdgpu_ring *ring)
545 {
546 if (ring)
547 amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
548 }
549
550 /*
551 * Common fence implementation
552 */
553
554 static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
555 {
556 return "amdgpu";
557 }
558
559 static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
560 {
561 struct amdgpu_fence *fence = to_amdgpu_fence(f);
562 return (const char *)fence->ring->name;
563 }
564
565 /**
566 * amdgpu_fence_enable_signaling - enable signalling on fence
567 * @fence: fence
568 *
569 * This function is called with fence_queue lock held, and adds a callback
570 * to fence_queue that checks if this fence is signaled, and if so it
571 * signals the fence and removes itself.
572 */
573 static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
574 {
575 struct amdgpu_fence *fence = to_amdgpu_fence(f);
576 struct amdgpu_ring *ring = fence->ring;
577
578 if (!timer_pending(&ring->fence_drv.fallback_timer))
579 amdgpu_fence_schedule_fallback(ring);
580
581 DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
582
583 return true;
584 }
585
586 /**
587 * amdgpu_fence_free - free up the fence memory
588 *
589 * @rcu: RCU callback head
590 *
591 * Free up the fence memory after the RCU grace period.
592 */
593 static void amdgpu_fence_free(struct rcu_head *rcu)
594 {
595 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
596 struct amdgpu_fence *fence = to_amdgpu_fence(f);
597 kmem_cache_free(amdgpu_fence_slab, fence);
598 }
599
600 /**
601 * amdgpu_fence_release - callback that fence can be freed
602 *
603 * @fence: fence
604 *
605 * This function is called when the reference count becomes zero.
606 * It just RCU schedules freeing up the fence.
607 */
608 static void amdgpu_fence_release(struct dma_fence *f)
609 {
610 call_rcu(&f->rcu, amdgpu_fence_free);
611 }
612
613 static const struct dma_fence_ops amdgpu_fence_ops = {
614 .get_driver_name = amdgpu_fence_get_driver_name,
615 .get_timeline_name = amdgpu_fence_get_timeline_name,
616 .enable_signaling = amdgpu_fence_enable_signaling,
617 .wait = dma_fence_default_wait,
618 .release = amdgpu_fence_release,
619 };
620
621 /*
622 * Fence debugfs
623 */
624 #if defined(CONFIG_DEBUG_FS)
625 static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
626 {
627 struct drm_info_node *node = (struct drm_info_node *)m->private;
628 struct drm_device *dev = node->minor->dev;
629 struct amdgpu_device *adev = dev->dev_private;
630 int i;
631
632 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
633 struct amdgpu_ring *ring = adev->rings[i];
634 if (!ring || !ring->fence_drv.initialized)
635 continue;
636
637 amdgpu_fence_process(ring);
638
639 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
640 seq_printf(m, "Last signaled fence 0x%08x\n",
641 atomic_read(&ring->fence_drv.last_seq));
642 seq_printf(m, "Last emitted 0x%08x\n",
643 ring->fence_drv.sync_seq);
644 }
645 return 0;
646 }
647
648 /**
649 * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset
650 *
651 * Manually trigger a gpu reset at the next fence wait.
652 */
653 static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
654 {
655 struct drm_info_node *node = (struct drm_info_node *) m->private;
656 struct drm_device *dev = node->minor->dev;
657 struct amdgpu_device *adev = dev->dev_private;
658
659 seq_printf(m, "gpu reset\n");
660 amdgpu_gpu_reset(adev);
661
662 return 0;
663 }
664
665 static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
666 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
667 {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
668 };
669
670 static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
671 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
672 };
673 #endif
674
675 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
676 {
677 #if defined(CONFIG_DEBUG_FS)
678 if (amdgpu_sriov_vf(adev))
679 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov, 1);
680 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
681 #else
682 return 0;
683 #endif
684 }
685