1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2019 NXP
5 #include <rte_atomic.h>
6 #include <rte_common.h>
7 #include <rte_cycles.h>
10 #include <rte_ethdev.h>
11 #include <rte_eventdev.h>
12 #include <rte_hexdump.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_launch.h>
17 #include <rte_lcore.h>
18 #include <rte_per_lcore.h>
19 #include <rte_random.h>
20 #include <rte_bus_vdev.h>
23 #include "dpaa2_eventdev.h"
24 #include "dpaa2_eventdev_logs.h"
27 #define NUM_PACKETS (1 << 18)
29 #define DPAA2_TEST_RUN(setup, teardown, test) \
30 dpaa2_test_run(setup, teardown, test, #test)
35 static int unsupported
;
38 static struct rte_mempool
*eventdev_test_mempool
;
43 uint8_t sub_event_type
;
50 static uint32_t seqn_list_index
;
51 static int seqn_list
[NUM_PACKETS
];
56 RTE_BUILD_BUG_ON(NUM_PACKETS
< MAX_EVENTS
);
57 memset(seqn_list
, 0, sizeof(seqn_list
));
61 struct test_core_param
{
62 rte_atomic32_t
*total_events
;
63 uint64_t dequeue_tmo_ticks
;
71 const char *eventdev_name
= "event_dpaa2";
73 evdev
= rte_event_dev_get_dev_id(eventdev_name
);
75 dpaa2_evdev_dbg("%d: Eventdev %s not found - creating.",
76 __LINE__
, eventdev_name
);
77 if (rte_vdev_init(eventdev_name
, NULL
) < 0) {
78 dpaa2_evdev_err("Error creating eventdev %s",
82 evdev
= rte_event_dev_get_dev_id(eventdev_name
);
84 dpaa2_evdev_err("Error finding newly created eventdev");
93 testsuite_teardown(void)
95 rte_event_dev_close(evdev
);
99 devconf_set_default_sane_values(struct rte_event_dev_config
*dev_conf
,
100 struct rte_event_dev_info
*info
)
102 memset(dev_conf
, 0, sizeof(struct rte_event_dev_config
));
103 dev_conf
->dequeue_timeout_ns
= info
->min_dequeue_timeout_ns
;
104 dev_conf
->nb_event_ports
= info
->max_event_ports
;
105 dev_conf
->nb_event_queues
= info
->max_event_queues
;
106 dev_conf
->nb_event_queue_flows
= info
->max_event_queue_flows
;
107 dev_conf
->nb_event_port_dequeue_depth
=
108 info
->max_event_port_dequeue_depth
;
109 dev_conf
->nb_event_port_enqueue_depth
=
110 info
->max_event_port_enqueue_depth
;
111 dev_conf
->nb_event_port_enqueue_depth
=
112 info
->max_event_port_enqueue_depth
;
113 dev_conf
->nb_events_limit
=
114 info
->max_num_events
;
118 TEST_EVENTDEV_SETUP_DEFAULT
,
119 TEST_EVENTDEV_SETUP_PRIORITY
,
120 TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT
,
124 _eventdev_setup(int mode
)
127 struct rte_event_dev_config dev_conf
;
128 struct rte_event_dev_info info
;
129 const char *pool_name
= "evdev_dpaa2_test_pool";
131 /* Create and destrory pool for each test case to make it standalone */
132 eventdev_test_mempool
= rte_pktmbuf_pool_create(pool_name
,
134 0 /*MBUF_CACHE_SIZE*/,
136 512, /* Use very small mbufs */
138 if (!eventdev_test_mempool
) {
139 dpaa2_evdev_err("ERROR creating mempool");
143 ret
= rte_event_dev_info_get(evdev
, &info
);
144 RTE_TEST_ASSERT_SUCCESS(ret
, "Failed to get event dev info");
145 RTE_TEST_ASSERT(info
.max_num_events
>= (int32_t)MAX_EVENTS
,
146 "ERROR max_num_events=%d < max_events=%d",
147 info
.max_num_events
, MAX_EVENTS
);
149 devconf_set_default_sane_values(&dev_conf
, &info
);
150 if (mode
== TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT
)
151 dev_conf
.event_dev_cfg
|= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
;
153 ret
= rte_event_dev_configure(evdev
, &dev_conf
);
154 RTE_TEST_ASSERT_SUCCESS(ret
, "Failed to configure eventdev");
156 uint32_t queue_count
;
157 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
158 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
,
159 &queue_count
), "Queue count get failed");
161 if (mode
== TEST_EVENTDEV_SETUP_PRIORITY
) {
162 if (queue_count
> 8) {
164 "test expects the unique priority per queue");
168 /* Configure event queues(0 to n) with
169 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
170 * RTE_EVENT_DEV_PRIORITY_LOWEST
172 uint8_t step
= (RTE_EVENT_DEV_PRIORITY_LOWEST
+ 1) /
174 for (i
= 0; i
< (int)queue_count
; i
++) {
175 struct rte_event_queue_conf queue_conf
;
177 ret
= rte_event_queue_default_conf_get(evdev
, i
,
179 RTE_TEST_ASSERT_SUCCESS(ret
, "Failed to get def_conf%d",
181 queue_conf
.priority
= i
* step
;
182 ret
= rte_event_queue_setup(evdev
, i
, &queue_conf
);
183 RTE_TEST_ASSERT_SUCCESS(ret
, "Failed to setup queue=%d",
188 /* Configure event queues with default priority */
189 for (i
= 0; i
< (int)queue_count
; i
++) {
190 ret
= rte_event_queue_setup(evdev
, i
, NULL
);
191 RTE_TEST_ASSERT_SUCCESS(ret
, "Failed to setup queue=%d",
195 /* Configure event ports */
197 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
198 RTE_EVENT_DEV_ATTR_PORT_COUNT
,
199 &port_count
), "Port count get failed");
200 for (i
= 0; i
< (int)port_count
; i
++) {
201 ret
= rte_event_port_setup(evdev
, i
, NULL
);
202 RTE_TEST_ASSERT_SUCCESS(ret
, "Failed to setup port=%d", i
);
203 ret
= rte_event_port_link(evdev
, i
, NULL
, NULL
, 0);
204 RTE_TEST_ASSERT(ret
>= 0, "Failed to link all queues port=%d",
208 ret
= rte_event_dev_start(evdev
);
209 RTE_TEST_ASSERT_SUCCESS(ret
, "Failed to start device");
217 return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT
);
221 eventdev_teardown(void)
223 rte_event_dev_stop(evdev
);
224 rte_mempool_free(eventdev_test_mempool
);
228 update_event_and_validation_attr(struct rte_mbuf
*m
, struct rte_event
*ev
,
229 uint32_t flow_id
, uint8_t event_type
,
230 uint8_t sub_event_type
, uint8_t sched_type
,
231 uint8_t queue
, uint8_t port
, uint8_t seq
)
233 struct event_attr
*attr
;
235 /* Store the event attributes in mbuf for future reference */
236 attr
= rte_pktmbuf_mtod(m
, struct event_attr
*);
237 attr
->flow_id
= flow_id
;
238 attr
->event_type
= event_type
;
239 attr
->sub_event_type
= sub_event_type
;
240 attr
->sched_type
= sched_type
;
245 ev
->flow_id
= flow_id
;
246 ev
->sub_event_type
= sub_event_type
;
247 ev
->event_type
= event_type
;
248 /* Inject the new event */
249 ev
->op
= RTE_EVENT_OP_NEW
;
250 ev
->sched_type
= sched_type
;
251 ev
->queue_id
= queue
;
256 inject_events(uint32_t flow_id
, uint8_t event_type
, uint8_t sub_event_type
,
257 uint8_t sched_type
, uint8_t queue
, uint8_t port
,
263 for (i
= 0; i
< events
; i
++) {
264 struct rte_event ev
= {.event
= 0, .u64
= 0};
266 m
= rte_pktmbuf_alloc(eventdev_test_mempool
);
267 RTE_TEST_ASSERT_NOT_NULL(m
, "mempool alloc failed");
269 update_event_and_validation_attr(m
, &ev
, flow_id
, event_type
,
270 sub_event_type
, sched_type
, queue
, port
, i
);
271 rte_event_enqueue_burst(evdev
, port
, &ev
, 1);
277 check_excess_events(uint8_t port
)
280 uint16_t valid_event
;
283 /* Check for excess events, try for a few times and exit */
284 for (i
= 0; i
< 32; i
++) {
285 valid_event
= rte_event_dequeue_burst(evdev
, port
, &ev
, 1, 0);
287 RTE_TEST_ASSERT_SUCCESS(valid_event
,
288 "Unexpected valid event=%d", ev
.mbuf
->seqn
);
294 generate_random_events(const unsigned int total_events
)
296 struct rte_event_dev_info info
;
300 uint32_t queue_count
;
301 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
302 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
,
303 &queue_count
), "Queue count get failed");
305 ret
= rte_event_dev_info_get(evdev
, &info
);
306 RTE_TEST_ASSERT_SUCCESS(ret
, "Failed to get event dev info");
307 for (i
= 0; i
< total_events
; i
++) {
309 rte_rand() % info
.max_event_queue_flows
/*flow_id */,
310 RTE_EVENT_TYPE_CPU
/* event_type */,
311 rte_rand() % 256 /* sub_event_type */,
312 rte_rand() % (RTE_SCHED_TYPE_PARALLEL
+ 1),
313 rte_rand() % queue_count
/* queue */,
324 validate_event(struct rte_event
*ev
)
326 struct event_attr
*attr
;
328 attr
= rte_pktmbuf_mtod(ev
->mbuf
, struct event_attr
*);
329 RTE_TEST_ASSERT_EQUAL(attr
->flow_id
, ev
->flow_id
,
330 "flow_id mismatch enq=%d deq =%d",
331 attr
->flow_id
, ev
->flow_id
);
332 RTE_TEST_ASSERT_EQUAL(attr
->event_type
, ev
->event_type
,
333 "event_type mismatch enq=%d deq =%d",
334 attr
->event_type
, ev
->event_type
);
335 RTE_TEST_ASSERT_EQUAL(attr
->sub_event_type
, ev
->sub_event_type
,
336 "sub_event_type mismatch enq=%d deq =%d",
337 attr
->sub_event_type
, ev
->sub_event_type
);
338 RTE_TEST_ASSERT_EQUAL(attr
->sched_type
, ev
->sched_type
,
339 "sched_type mismatch enq=%d deq =%d",
340 attr
->sched_type
, ev
->sched_type
);
341 RTE_TEST_ASSERT_EQUAL(attr
->queue
, ev
->queue_id
,
342 "queue mismatch enq=%d deq =%d",
343 attr
->queue
, ev
->queue_id
);
347 typedef int (*validate_event_cb
)(uint32_t index
, uint8_t port
,
348 struct rte_event
*ev
);
351 consume_events(uint8_t port
, const uint32_t total_events
, validate_event_cb fn
)
354 uint16_t valid_event
;
355 uint32_t events
= 0, forward_progress_cnt
= 0, index
= 0;
359 if (++forward_progress_cnt
> UINT16_MAX
) {
360 dpaa2_evdev_err("Detected deadlock");
364 valid_event
= rte_event_dequeue_burst(evdev
, port
, &ev
, 1, 0);
368 forward_progress_cnt
= 0;
369 ret
= validate_event(&ev
);
374 ret
= fn(index
, port
, &ev
);
375 RTE_TEST_ASSERT_SUCCESS(ret
,
376 "Failed to validate test specific event");
381 rte_pktmbuf_free(ev
.mbuf
);
382 if (++events
>= total_events
)
386 return check_excess_events(port
);
390 validate_simple_enqdeq(uint32_t index
, uint8_t port
, struct rte_event
*ev
)
392 struct event_attr
*attr
;
394 attr
= rte_pktmbuf_mtod(ev
->mbuf
, struct event_attr
*);
397 RTE_TEST_ASSERT_EQUAL(index
, attr
->seq
,
398 "index=%d != seqn=%d", index
, attr
->seq
);
403 test_simple_enqdeq(uint8_t sched_type
)
407 ret
= inject_events(0 /*flow_id */,
408 RTE_EVENT_TYPE_CPU
/* event_type */,
409 0 /* sub_event_type */,
417 return consume_events(0 /* port */, MAX_EVENTS
, validate_simple_enqdeq
);
421 test_simple_enqdeq_atomic(void)
423 return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC
);
427 test_simple_enqdeq_parallel(void)
429 return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL
);
433 * Generate a prescribed number of events and spread them across available
434 * queues. On dequeue, using single event port(port 0) verify the enqueued
438 test_multi_queue_enq_single_port_deq(void)
442 ret
= generate_random_events(MAX_EVENTS
);
446 return consume_events(0 /* port */, MAX_EVENTS
, NULL
);
450 worker_multi_port_fn(void *arg
)
452 struct test_core_param
*param
= arg
;
454 uint16_t valid_event
;
455 uint8_t port
= param
->port
;
456 rte_atomic32_t
*total_events
= param
->total_events
;
459 while (rte_atomic32_read(total_events
) > 0) {
460 valid_event
= rte_event_dequeue_burst(evdev
, port
, &ev
, 1, 0);
464 ret
= validate_event(&ev
);
465 RTE_TEST_ASSERT_SUCCESS(ret
, "Failed to validate event");
466 rte_pktmbuf_free(ev
.mbuf
);
467 rte_atomic32_sub(total_events
, 1);
473 wait_workers_to_join(int lcore
, const rte_atomic32_t
*count
)
475 uint64_t cycles
, print_cycles
;
479 print_cycles
= cycles
= rte_get_timer_cycles();
480 while (rte_eal_get_lcore_state(lcore
) != FINISHED
) {
481 uint64_t new_cycles
= rte_get_timer_cycles();
483 if (new_cycles
- print_cycles
> rte_get_timer_hz()) {
484 dpaa2_evdev_dbg("\r%s: events %d", __func__
,
485 rte_atomic32_read(count
));
486 print_cycles
= new_cycles
;
488 if (new_cycles
- cycles
> rte_get_timer_hz() * 10) {
490 "%s: No schedules for seconds, deadlock (%d)",
492 rte_atomic32_read(count
));
493 rte_event_dev_dump(evdev
, stdout
);
498 rte_eal_mp_wait_lcore();
504 launch_workers_and_wait(int (*master_worker
)(void *),
505 int (*slave_workers
)(void *), uint32_t total_events
,
506 uint8_t nb_workers
, uint8_t sched_type
)
511 struct test_core_param
*param
;
512 rte_atomic32_t atomic_total_events
;
513 uint64_t dequeue_tmo_ticks
;
518 rte_atomic32_set(&atomic_total_events
, total_events
);
521 param
= malloc(sizeof(struct test_core_param
) * nb_workers
);
525 ret
= rte_event_dequeue_timeout_ticks(evdev
,
526 rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks
);
532 param
[0].total_events
= &atomic_total_events
;
533 param
[0].sched_type
= sched_type
;
535 param
[0].dequeue_tmo_ticks
= dequeue_tmo_ticks
;
538 w_lcore
= rte_get_next_lcore(
542 rte_eal_remote_launch(master_worker
, ¶m
[0], w_lcore
);
544 for (port
= 1; port
< nb_workers
; port
++) {
545 param
[port
].total_events
= &atomic_total_events
;
546 param
[port
].sched_type
= sched_type
;
547 param
[port
].port
= port
;
548 param
[port
].dequeue_tmo_ticks
= dequeue_tmo_ticks
;
550 w_lcore
= rte_get_next_lcore(w_lcore
, 1, 0);
551 rte_eal_remote_launch(slave_workers
, ¶m
[port
], w_lcore
);
554 ret
= wait_workers_to_join(w_lcore
, &atomic_total_events
);
560 * Generate a prescribed number of events and spread them across available
561 * queues. Dequeue the events through multiple ports and verify the enqueued
565 test_multi_queue_enq_multi_port_deq(void)
567 const unsigned int total_events
= MAX_EVENTS
;
571 ret
= generate_random_events(total_events
);
575 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
576 RTE_EVENT_DEV_ATTR_PORT_COUNT
,
577 &nr_ports
), "Port count get failed");
578 nr_ports
= RTE_MIN(nr_ports
, rte_lcore_count() - 1);
581 dpaa2_evdev_err("%s: Not enough ports=%d or workers=%d",
582 __func__
, nr_ports
, rte_lcore_count() - 1);
586 return launch_workers_and_wait(worker_multi_port_fn
,
587 worker_multi_port_fn
, total_events
,
588 nr_ports
, 0xff /* invalid */);
592 void flush(uint8_t dev_id
, struct rte_event event
, void *arg
)
594 unsigned int *count
= arg
;
596 RTE_SET_USED(dev_id
);
597 if (event
.event_type
== RTE_EVENT_TYPE_CPU
)
603 test_dev_stop_flush(void)
605 unsigned int total_events
= MAX_EVENTS
, count
= 0;
608 ret
= generate_random_events(total_events
);
612 ret
= rte_event_dev_stop_flush_callback_register(evdev
, flush
, &count
);
615 rte_event_dev_stop(evdev
);
616 ret
= rte_event_dev_stop_flush_callback_register(evdev
, NULL
, NULL
);
619 RTE_TEST_ASSERT_EQUAL(total_events
, count
,
620 "count mismatch total_events=%d count=%d",
621 total_events
, count
);
626 validate_queue_to_port_single_link(uint32_t index
, uint8_t port
,
627 struct rte_event
*ev
)
630 RTE_TEST_ASSERT_EQUAL(port
, ev
->queue_id
,
631 "queue mismatch enq=%d deq =%d",
637 * Link queue x to port x and check correctness of link by checking
638 * queue_id == x on dequeue on the specific port x
641 test_queue_to_port_single_link(void)
643 int i
, nr_links
, ret
;
647 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
648 RTE_EVENT_DEV_ATTR_PORT_COUNT
,
649 &port_count
), "Port count get failed");
651 /* Unlink all connections that created in eventdev_setup */
652 for (i
= 0; i
< (int)port_count
; i
++) {
653 ret
= rte_event_port_unlink(evdev
, i
, NULL
, 0);
654 RTE_TEST_ASSERT(ret
>= 0,
655 "Failed to unlink all queues port=%d", i
);
658 uint32_t queue_count
;
660 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
661 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
,
662 &queue_count
), "Queue count get failed");
664 nr_links
= RTE_MIN(port_count
, queue_count
);
665 const unsigned int total_events
= MAX_EVENTS
/ nr_links
;
667 /* Link queue x to port x and inject events to queue x through port x */
668 for (i
= 0; i
< nr_links
; i
++) {
669 uint8_t queue
= (uint8_t)i
;
671 ret
= rte_event_port_link(evdev
, i
, &queue
, NULL
, 1);
672 RTE_TEST_ASSERT(ret
== 1, "Failed to link queue to port %d", i
);
676 RTE_EVENT_TYPE_CPU
/* event_type */,
677 rte_rand() % 256 /* sub_event_type */,
678 rte_rand() % (RTE_SCHED_TYPE_PARALLEL
+ 1),
681 total_events
/* events */);
686 /* Verify the events generated from correct queue */
687 for (i
= 0; i
< nr_links
; i
++) {
688 ret
= consume_events(i
/* port */, total_events
,
689 validate_queue_to_port_single_link
);
698 validate_queue_to_port_multi_link(uint32_t index
, uint8_t port
,
699 struct rte_event
*ev
)
702 RTE_TEST_ASSERT_EQUAL(port
, (ev
->queue_id
& 0x1),
703 "queue mismatch enq=%d deq =%d",
709 * Link all even number of queues to port 0 and all odd number of queues to
710 * port 1 and verify the link connection on dequeue
713 test_queue_to_port_multi_link(void)
715 int ret
, port0_events
= 0, port1_events
= 0;
717 uint32_t nr_queues
= 0;
718 uint32_t nr_ports
= 0;
720 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
721 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
,
722 &nr_queues
), "Queue count get failed");
724 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
725 RTE_EVENT_DEV_ATTR_QUEUE_COUNT
,
726 &nr_queues
), "Queue count get failed");
727 RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev
,
728 RTE_EVENT_DEV_ATTR_PORT_COUNT
,
729 &nr_ports
), "Port count get failed");
732 dpaa2_evdev_err("%s: Not enough ports to test ports=%d",
737 /* Unlink all connections that created in eventdev_setup */
738 for (port
= 0; port
< nr_ports
; port
++) {
739 ret
= rte_event_port_unlink(evdev
, port
, NULL
, 0);
740 RTE_TEST_ASSERT(ret
>= 0, "Failed to unlink all queues port=%d",
744 const unsigned int total_events
= MAX_EVENTS
/ nr_queues
;
746 /* Link all even number of queues to port0 and odd numbers to port 1*/
747 for (queue
= 0; queue
< nr_queues
; queue
++) {
749 ret
= rte_event_port_link(evdev
, port
, &queue
, NULL
, 1);
750 RTE_TEST_ASSERT(ret
== 1, "Failed to link queue=%d to port=%d",
755 RTE_EVENT_TYPE_CPU
/* event_type */,
756 rte_rand() % 256 /* sub_event_type */,
757 rte_rand() % (RTE_SCHED_TYPE_PARALLEL
+ 1),
760 total_events
/* events */);
765 port0_events
+= total_events
;
767 port1_events
+= total_events
;
770 ret
= consume_events(0 /* port */, port0_events
,
771 validate_queue_to_port_multi_link
);
774 ret
= consume_events(1 /* port */, port1_events
,
775 validate_queue_to_port_multi_link
);
782 static void dpaa2_test_run(int (*setup
)(void), void (*tdown
)(void),
783 int (*test
)(void), const char *name
)
786 RTE_LOG(INFO
, PMD
, "Error setting up test %s", name
);
791 RTE_LOG(INFO
, PMD
, "%s Failed\n", name
);
794 RTE_LOG(INFO
, PMD
, "%s Passed", name
);
803 test_eventdev_dpaa2(void)
807 DPAA2_TEST_RUN(eventdev_setup
, eventdev_teardown
,
808 test_simple_enqdeq_atomic
);
809 DPAA2_TEST_RUN(eventdev_setup
, eventdev_teardown
,
810 test_simple_enqdeq_parallel
);
811 DPAA2_TEST_RUN(eventdev_setup
, eventdev_teardown
,
812 test_multi_queue_enq_single_port_deq
);
813 DPAA2_TEST_RUN(eventdev_setup
, eventdev_teardown
,
814 test_dev_stop_flush
);
815 DPAA2_TEST_RUN(eventdev_setup
, eventdev_teardown
,
816 test_multi_queue_enq_multi_port_deq
);
817 DPAA2_TEST_RUN(eventdev_setup
, eventdev_teardown
,
818 test_queue_to_port_single_link
);
819 DPAA2_TEST_RUN(eventdev_setup
, eventdev_teardown
,
820 test_queue_to_port_multi_link
);
822 DPAA2_EVENTDEV_INFO("Total tests : %d", total
);
823 DPAA2_EVENTDEV_INFO("Passed : %d", passed
);
824 DPAA2_EVENTDEV_INFO("Failed : %d", failed
);
825 DPAA2_EVENTDEV_INFO("Not supported : %d", unsupported
);
827 testsuite_teardown();