]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/event/sw/sw_evdev_xstats.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / event / sw / sw_evdev_xstats.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
3 */
4
5#include <rte_event_ring.h>
6#include "sw_evdev.h"
7#include "iq_chunk.h"
8
9enum xstats_type {
10 /* common stats */
11 rx,
12 tx,
13 dropped,
14 inflight,
15 calls,
16 credits,
17 /* device instance specific */
18 no_iq_enq,
19 no_cq_enq,
20 /* port_specific */
21 rx_used,
22 rx_free,
23 tx_used,
24 tx_free,
25 pkt_cycles,
26 poll_return, /* for zero-count and used also for port bucket loop */
27 /* qid_specific */
28 iq_used,
29 /* qid port mapping specific */
30 pinned,
31 pkts, /* note: qid-to-port pkts */
32};
33
34typedef uint64_t (*xstats_fn)(const struct sw_evdev *dev,
35 uint16_t obj_idx, /* port or queue id */
36 enum xstats_type stat, int extra_arg);
37
38struct sw_xstats_entry {
39 struct rte_event_dev_xstats_name name;
40 xstats_fn fn;
41 uint16_t obj_idx;
42 enum xstats_type stat;
43 enum rte_event_dev_xstats_mode mode;
44 int extra_arg;
45 uint8_t reset_allowed; /* when set, this value can be reset */
46 uint64_t reset_value; /* an offset to be taken away to emulate resets */
47};
48
49static uint64_t
50get_dev_stat(const struct sw_evdev *sw, uint16_t obj_idx __rte_unused,
51 enum xstats_type type, int extra_arg __rte_unused)
52{
53 switch (type) {
54 case rx: return sw->stats.rx_pkts;
55 case tx: return sw->stats.tx_pkts;
56 case dropped: return sw->stats.rx_dropped;
57 case calls: return sw->sched_called;
58 case no_iq_enq: return sw->sched_no_iq_enqueues;
59 case no_cq_enq: return sw->sched_no_cq_enqueues;
60 default: return -1;
61 }
62}
63
64static uint64_t
65get_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
66 enum xstats_type type, int extra_arg __rte_unused)
67{
68 const struct sw_port *p = &sw->ports[obj_idx];
69
70 switch (type) {
71 case rx: return p->stats.rx_pkts;
72 case tx: return p->stats.tx_pkts;
73 case dropped: return p->stats.rx_dropped;
74 case inflight: return p->inflights;
75 case pkt_cycles: return p->avg_pkt_ticks;
76 case calls: return p->total_polls;
77 case credits: return p->inflight_credits;
78 case poll_return: return p->zero_polls;
79 case rx_used: return rte_event_ring_count(p->rx_worker_ring);
80 case rx_free: return rte_event_ring_free_count(p->rx_worker_ring);
81 case tx_used: return rte_event_ring_count(p->cq_worker_ring);
82 case tx_free: return rte_event_ring_free_count(p->cq_worker_ring);
83 default: return -1;
84 }
85}
86
87static uint64_t
88get_port_bucket_stat(const struct sw_evdev *sw, uint16_t obj_idx,
89 enum xstats_type type, int extra_arg)
90{
91 const struct sw_port *p = &sw->ports[obj_idx];
92
93 switch (type) {
94 case poll_return: return p->poll_buckets[extra_arg];
95 default: return -1;
96 }
97}
98
99static uint64_t
100get_qid_stat(const struct sw_evdev *sw, uint16_t obj_idx,
101 enum xstats_type type, int extra_arg __rte_unused)
102{
103 const struct sw_qid *qid = &sw->qids[obj_idx];
104
105 switch (type) {
106 case rx: return qid->stats.rx_pkts;
107 case tx: return qid->stats.tx_pkts;
108 case dropped: return qid->stats.rx_dropped;
109 case inflight:
110 do {
111 uint64_t infl = 0;
112 unsigned int i;
113 for (i = 0; i < RTE_DIM(qid->fids); i++)
114 infl += qid->fids[i].pcount;
115 return infl;
116 } while (0);
117 break;
118 default: return -1;
119 }
120}
121
122static uint64_t
123get_qid_iq_stat(const struct sw_evdev *sw, uint16_t obj_idx,
124 enum xstats_type type, int extra_arg)
125{
126 const struct sw_qid *qid = &sw->qids[obj_idx];
127 const int iq_idx = extra_arg;
128
129 switch (type) {
130 case iq_used: return iq_count(&qid->iq[iq_idx]);
131 default: return -1;
132 }
133}
134
135static uint64_t
136get_qid_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
137 enum xstats_type type, int extra_arg)
138{
139 const struct sw_qid *qid = &sw->qids[obj_idx];
140 uint16_t port = extra_arg;
141
142 switch (type) {
143 case pinned:
144 do {
145 uint64_t pin = 0;
146 unsigned int i;
147 for (i = 0; i < RTE_DIM(qid->fids); i++)
148 if (qid->fids[i].cq == port)
149 pin++;
150 return pin;
151 } while (0);
152 break;
153 case pkts:
154 return qid->to_port[port];
155 default: return -1;
156 }
157}
158
159int
160sw_xstats_init(struct sw_evdev *sw)
161{
162 /*
163 * define the stats names and types. Used to build up the device
164 * xstats array
165 * There are multiple set of stats:
166 * - device-level,
167 * - per-port,
168 * - per-port-dequeue-burst-sizes
169 * - per-qid,
170 * - per-iq
171 * - per-port-per-qid
172 *
173 * For each of these sets, we have three parallel arrays, one for the
174 * names, the other for the stat type parameter to be passed in the fn
175 * call to get that stat. The third array allows resetting or not.
176 * All these arrays must be kept in sync
177 */
178 static const char * const dev_stats[] = { "rx", "tx", "drop",
179 "sched_calls", "sched_no_iq_enq", "sched_no_cq_enq",
180 };
181 static const enum xstats_type dev_types[] = { rx, tx, dropped,
182 calls, no_iq_enq, no_cq_enq,
183 };
184 /* all device stats are allowed to be reset */
185
186 static const char * const port_stats[] = {"rx", "tx", "drop",
187 "inflight", "avg_pkt_cycles", "credits",
188 "rx_ring_used", "rx_ring_free",
189 "cq_ring_used", "cq_ring_free",
190 "dequeue_calls", "dequeues_returning_0",
191 };
192 static const enum xstats_type port_types[] = { rx, tx, dropped,
193 inflight, pkt_cycles, credits,
194 rx_used, rx_free, tx_used, tx_free,
195 calls, poll_return,
196 };
197 static const uint8_t port_reset_allowed[] = {1, 1, 1,
198 0, 1, 0,
199 0, 0, 0, 0,
200 1, 1,
201 };
202
203 static const char * const port_bucket_stats[] = {
204 "dequeues_returning" };
205 static const enum xstats_type port_bucket_types[] = { poll_return };
206 /* all bucket dequeues are allowed to be reset, handled in loop below */
207
208 static const char * const qid_stats[] = {"rx", "tx", "drop",
209 "inflight"
210 };
211 static const enum xstats_type qid_types[] = { rx, tx, dropped,
212 inflight
213 };
214 static const uint8_t qid_reset_allowed[] = {1, 1, 1,
215 0
216 };
217
218 static const char * const qid_iq_stats[] = { "used" };
219 static const enum xstats_type qid_iq_types[] = { iq_used };
220 /* reset allowed */
221
222 static const char * const qid_port_stats[] = { "pinned_flows",
223 "packets"
224 };
225 static const enum xstats_type qid_port_types[] = { pinned, pkts };
226 static const uint8_t qid_port_reset_allowed[] = {0, 1};
227 /* reset allowed */
228 /* ---- end of stat definitions ---- */
229
230 /* check sizes, since a missed comma can lead to strings being
231 * joined by the compiler.
232 */
233 RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_types));
234 RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_types));
235 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_types));
236 RTE_BUILD_BUG_ON(RTE_DIM(qid_iq_stats) != RTE_DIM(qid_iq_types));
237 RTE_BUILD_BUG_ON(RTE_DIM(qid_port_stats) != RTE_DIM(qid_port_types));
238 RTE_BUILD_BUG_ON(RTE_DIM(port_bucket_stats) !=
239 RTE_DIM(port_bucket_types));
240
241 RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_reset_allowed));
242 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_reset_allowed));
243
244 /* other vars */
245 const uint32_t cons_bkt_shift =
246 (MAX_SW_CONS_Q_DEPTH >> SW_DEQ_STAT_BUCKET_SHIFT);
247 const unsigned int count = RTE_DIM(dev_stats) +
248 sw->port_count * RTE_DIM(port_stats) +
249 sw->port_count * RTE_DIM(port_bucket_stats) *
250 (cons_bkt_shift + 1) +
251 sw->qid_count * RTE_DIM(qid_stats) +
252 sw->qid_count * SW_IQS_MAX * RTE_DIM(qid_iq_stats) +
253 sw->qid_count * sw->port_count *
254 RTE_DIM(qid_port_stats);
255 unsigned int i, port, qid, iq, bkt, stat = 0;
256
257 sw->xstats = rte_zmalloc_socket(NULL, sizeof(sw->xstats[0]) * count, 0,
258 sw->data->socket_id);
259 if (sw->xstats == NULL)
260 return -ENOMEM;
261
262#define sname sw->xstats[stat].name.name
263 for (i = 0; i < RTE_DIM(dev_stats); i++, stat++) {
264 sw->xstats[stat] = (struct sw_xstats_entry){
265 .fn = get_dev_stat,
266 .stat = dev_types[i],
267 .mode = RTE_EVENT_DEV_XSTATS_DEVICE,
268 .reset_allowed = 1,
269 };
270 snprintf(sname, sizeof(sname), "dev_%s", dev_stats[i]);
271 }
272 sw->xstats_count_mode_dev = stat;
273
274 for (port = 0; port < sw->port_count; port++) {
275 sw->xstats_offset_for_port[port] = stat;
276
277 uint32_t count_offset = stat;
278
279 for (i = 0; i < RTE_DIM(port_stats); i++, stat++) {
280 sw->xstats[stat] = (struct sw_xstats_entry){
281 .fn = get_port_stat,
282 .obj_idx = port,
283 .stat = port_types[i],
284 .mode = RTE_EVENT_DEV_XSTATS_PORT,
285 .reset_allowed = port_reset_allowed[i],
286 };
287 snprintf(sname, sizeof(sname), "port_%u_%s",
288 port, port_stats[i]);
289 }
290
291 for (bkt = 0; bkt < (rte_event_ring_get_capacity(
292 sw->ports[port].cq_worker_ring) >>
293 SW_DEQ_STAT_BUCKET_SHIFT) + 1; bkt++) {
294 for (i = 0; i < RTE_DIM(port_bucket_stats); i++) {
295 sw->xstats[stat] = (struct sw_xstats_entry){
296 .fn = get_port_bucket_stat,
297 .obj_idx = port,
298 .stat = port_bucket_types[i],
299 .mode = RTE_EVENT_DEV_XSTATS_PORT,
300 .extra_arg = bkt,
301 .reset_allowed = 1,
302 };
303 snprintf(sname, sizeof(sname),
304 "port_%u_%s_%u-%u",
305 port, port_bucket_stats[i],
306 (bkt << SW_DEQ_STAT_BUCKET_SHIFT) + 1,
307 (bkt + 1) << SW_DEQ_STAT_BUCKET_SHIFT);
308 stat++;
309 }
310 }
311
312 sw->xstats_count_per_port[port] = stat - count_offset;
313 }
314
315 sw->xstats_count_mode_port = stat - sw->xstats_count_mode_dev;
316
317 for (qid = 0; qid < sw->qid_count; qid++) {
318 uint32_t count_offset = stat;
319 sw->xstats_offset_for_qid[qid] = stat;
320
321 for (i = 0; i < RTE_DIM(qid_stats); i++, stat++) {
322 sw->xstats[stat] = (struct sw_xstats_entry){
323 .fn = get_qid_stat,
324 .obj_idx = qid,
325 .stat = qid_types[i],
326 .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
327 .reset_allowed = qid_reset_allowed[i],
328 };
329 snprintf(sname, sizeof(sname), "qid_%u_%s",
330 qid, qid_stats[i]);
331 }
332 for (iq = 0; iq < SW_IQS_MAX; iq++)
333 for (i = 0; i < RTE_DIM(qid_iq_stats); i++, stat++) {
334 sw->xstats[stat] = (struct sw_xstats_entry){
335 .fn = get_qid_iq_stat,
336 .obj_idx = qid,
337 .stat = qid_iq_types[i],
338 .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
339 .extra_arg = iq,
340 .reset_allowed = 0,
341 };
342 snprintf(sname, sizeof(sname),
343 "qid_%u_iq_%u_%s",
344 qid, iq,
345 qid_iq_stats[i]);
346 }
347
348 for (port = 0; port < sw->port_count; port++)
349 for (i = 0; i < RTE_DIM(qid_port_stats); i++, stat++) {
350 sw->xstats[stat] = (struct sw_xstats_entry){
351 .fn = get_qid_port_stat,
352 .obj_idx = qid,
353 .stat = qid_port_types[i],
354 .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
355 .extra_arg = port,
356 .reset_allowed =
357 qid_port_reset_allowed[i],
358 };
359 snprintf(sname, sizeof(sname),
360 "qid_%u_port_%u_%s",
361 qid, port,
362 qid_port_stats[i]);
363 }
364
365 sw->xstats_count_per_qid[qid] = stat - count_offset;
366 }
367
368 sw->xstats_count_mode_queue = stat -
369 (sw->xstats_count_mode_dev + sw->xstats_count_mode_port);
370#undef sname
371
372 sw->xstats_count = stat;
373
374 return stat;
375}
376
377int
378sw_xstats_uninit(struct sw_evdev *sw)
379{
380 rte_free(sw->xstats);
381 sw->xstats_count = 0;
382 return 0;
383}
384
385int
386sw_xstats_get_names(const struct rte_eventdev *dev,
387 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
388 struct rte_event_dev_xstats_name *xstats_names,
389 unsigned int *ids, unsigned int size)
390{
391 const struct sw_evdev *sw = sw_pmd_priv_const(dev);
392 unsigned int i;
393 unsigned int xidx = 0;
11fdf7f2
TL
394
395 uint32_t xstats_mode_count = 0;
396 uint32_t start_offset = 0;
397
398 switch (mode) {
399 case RTE_EVENT_DEV_XSTATS_DEVICE:
400 xstats_mode_count = sw->xstats_count_mode_dev;
401 break;
402 case RTE_EVENT_DEV_XSTATS_PORT:
403 if (queue_port_id >= (signed int)sw->port_count)
404 break;
405 xstats_mode_count = sw->xstats_count_per_port[queue_port_id];
406 start_offset = sw->xstats_offset_for_port[queue_port_id];
407 break;
408 case RTE_EVENT_DEV_XSTATS_QUEUE:
409 if (queue_port_id >= (signed int)sw->qid_count)
410 break;
411 xstats_mode_count = sw->xstats_count_per_qid[queue_port_id];
412 start_offset = sw->xstats_offset_for_qid[queue_port_id];
413 break;
414 default:
415 SW_LOG_ERR("Invalid mode received in sw_xstats_get_names()\n");
416 return -EINVAL;
417 };
418
419 if (xstats_mode_count > size || !ids || !xstats_names)
420 return xstats_mode_count;
421
422 for (i = 0; i < sw->xstats_count && xidx < size; i++) {
423 if (sw->xstats[i].mode != mode)
424 continue;
425
426 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
427 queue_port_id != sw->xstats[i].obj_idx)
428 continue;
429
430 xstats_names[xidx] = sw->xstats[i].name;
431 if (ids)
432 ids[xidx] = start_offset + xidx;
433 xidx++;
434 }
435 return xidx;
436}
437
438static int
439sw_xstats_update(struct sw_evdev *sw, enum rte_event_dev_xstats_mode mode,
440 uint8_t queue_port_id, const unsigned int ids[],
441 uint64_t values[], unsigned int n, const uint32_t reset,
442 const uint32_t ret_if_n_lt_nstats)
443{
444 unsigned int i;
445 unsigned int xidx = 0;
446 RTE_SET_USED(mode);
447 RTE_SET_USED(queue_port_id);
448
449 uint32_t xstats_mode_count = 0;
450
451 switch (mode) {
452 case RTE_EVENT_DEV_XSTATS_DEVICE:
453 xstats_mode_count = sw->xstats_count_mode_dev;
454 break;
455 case RTE_EVENT_DEV_XSTATS_PORT:
456 if (queue_port_id >= (signed int)sw->port_count)
457 goto invalid_value;
458 xstats_mode_count = sw->xstats_count_per_port[queue_port_id];
459 break;
460 case RTE_EVENT_DEV_XSTATS_QUEUE:
461 if (queue_port_id >= (signed int)sw->qid_count)
462 goto invalid_value;
463 xstats_mode_count = sw->xstats_count_per_qid[queue_port_id];
464 break;
465 default:
466 SW_LOG_ERR("Invalid mode received in sw_xstats_get()\n");
467 goto invalid_value;
468 };
469
470 /* this function can check num stats and return them (xstats_get() style
471 * behaviour) or ignore n for reset() of a single stat style behaviour.
472 */
473 if (ret_if_n_lt_nstats && xstats_mode_count > n)
474 return xstats_mode_count;
475
476 for (i = 0; i < n && xidx < xstats_mode_count; i++) {
477 struct sw_xstats_entry *xs = &sw->xstats[ids[i]];
478 if (ids[i] > sw->xstats_count || xs->mode != mode)
479 continue;
480
481 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
482 queue_port_id != xs->obj_idx)
483 continue;
484
485 uint64_t val = xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
486 - xs->reset_value;
487
488 if (values)
489 values[xidx] = val;
490
491 if (xs->reset_allowed && reset)
f67539c2 492 xs->reset_value += val;
11fdf7f2
TL
493
494 xidx++;
495 }
496
497 return xidx;
498invalid_value:
499 return -EINVAL;
500}
501
502int
503sw_xstats_get(const struct rte_eventdev *dev,
504 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
505 const unsigned int ids[], uint64_t values[], unsigned int n)
506{
507 struct sw_evdev *sw = sw_pmd_priv(dev);
508 const uint32_t reset = 0;
509 const uint32_t ret_n_lt_stats = 0;
510 return sw_xstats_update(sw, mode, queue_port_id, ids, values, n,
511 reset, ret_n_lt_stats);
512}
513
514uint64_t
515sw_xstats_get_by_name(const struct rte_eventdev *dev,
516 const char *name, unsigned int *id)
517{
518 const struct sw_evdev *sw = sw_pmd_priv_const(dev);
519 unsigned int i;
520
521 for (i = 0; i < sw->xstats_count; i++) {
522 struct sw_xstats_entry *xs = &sw->xstats[i];
523 if (strncmp(xs->name.name, name,
524 RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0){
525 if (id != NULL)
526 *id = i;
527 return xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
528 - xs->reset_value;
529 }
530 }
531 if (id != NULL)
532 *id = (uint32_t)-1;
533 return (uint64_t)-1;
534}
535
536static void
537sw_xstats_reset_range(struct sw_evdev *sw, uint32_t start, uint32_t num)
538{
539 uint32_t i;
540 for (i = start; i < start + num; i++) {
541 struct sw_xstats_entry *xs = &sw->xstats[i];
542 if (!xs->reset_allowed)
543 continue;
544
f67539c2 545 uint64_t val = xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg);
11fdf7f2
TL
546 xs->reset_value = val;
547 }
548}
549
550static int
551sw_xstats_reset_queue(struct sw_evdev *sw, uint8_t queue_id,
552 const uint32_t ids[], uint32_t nb_ids)
553{
554 const uint32_t reset = 1;
555 const uint32_t ret_n_lt_stats = 0;
556 if (ids) {
557 uint32_t nb_reset = sw_xstats_update(sw,
558 RTE_EVENT_DEV_XSTATS_QUEUE,
559 queue_id, ids, NULL, nb_ids,
560 reset, ret_n_lt_stats);
561 return nb_reset == nb_ids ? 0 : -EINVAL;
562 }
563
564 if (ids == NULL)
565 sw_xstats_reset_range(sw, sw->xstats_offset_for_qid[queue_id],
566 sw->xstats_count_per_qid[queue_id]);
567
568 return 0;
569}
570
571static int
572sw_xstats_reset_port(struct sw_evdev *sw, uint8_t port_id,
573 const uint32_t ids[], uint32_t nb_ids)
574{
575 const uint32_t reset = 1;
576 const uint32_t ret_n_lt_stats = 0;
577 int offset = sw->xstats_offset_for_port[port_id];
578 int nb_stat = sw->xstats_count_per_port[port_id];
579
580 if (ids) {
581 uint32_t nb_reset = sw_xstats_update(sw,
582 RTE_EVENT_DEV_XSTATS_PORT, port_id,
583 ids, NULL, nb_ids,
584 reset, ret_n_lt_stats);
585 return nb_reset == nb_ids ? 0 : -EINVAL;
586 }
587
588 sw_xstats_reset_range(sw, offset, nb_stat);
589 return 0;
590}
591
592static int
593sw_xstats_reset_dev(struct sw_evdev *sw, const uint32_t ids[], uint32_t nb_ids)
594{
595 uint32_t i;
596 if (ids) {
597 for (i = 0; i < nb_ids; i++) {
598 uint32_t id = ids[i];
599 if (id >= sw->xstats_count_mode_dev)
600 return -EINVAL;
601 sw_xstats_reset_range(sw, id, 1);
602 }
603 } else {
604 for (i = 0; i < sw->xstats_count_mode_dev; i++)
605 sw_xstats_reset_range(sw, i, 1);
606 }
607
608 return 0;
609}
610
611int
612sw_xstats_reset(struct rte_eventdev *dev,
613 enum rte_event_dev_xstats_mode mode,
614 int16_t queue_port_id,
615 const uint32_t ids[],
616 uint32_t nb_ids)
617{
618 struct sw_evdev *sw = sw_pmd_priv(dev);
619 uint32_t i, err;
620
621 /* handle -1 for queue_port_id here, looping over all ports/queues */
622 switch (mode) {
623 case RTE_EVENT_DEV_XSTATS_DEVICE:
624 sw_xstats_reset_dev(sw, ids, nb_ids);
625 break;
626 case RTE_EVENT_DEV_XSTATS_PORT:
627 if (queue_port_id == -1) {
628 for (i = 0; i < sw->port_count; i++) {
629 err = sw_xstats_reset_port(sw, i, ids, nb_ids);
630 if (err)
631 return -EINVAL;
632 }
633 } else if (queue_port_id < (int16_t)sw->port_count)
634 sw_xstats_reset_port(sw, queue_port_id, ids, nb_ids);
635 break;
636 case RTE_EVENT_DEV_XSTATS_QUEUE:
637 if (queue_port_id == -1) {
638 for (i = 0; i < sw->qid_count; i++) {
639 err = sw_xstats_reset_queue(sw, i, ids, nb_ids);
640 if (err)
641 return -EINVAL;
642 }
643 } else if (queue_port_id < (int16_t)sw->qid_count)
644 sw_xstats_reset_queue(sw, queue_port_id, ids, nb_ids);
645 break;
646 };
647
648 return 0;
649}