]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/net/enic/enic_clsf.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / enic / enic_clsf.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
7c673cae 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
7c673cae
FG
4 */
5
11fdf7f2 6#include <rte_ethdev_driver.h>
7c673cae
FG
7#include <rte_malloc.h>
8#include <rte_hash.h>
9#include <rte_byteorder.h>
10#include <rte_ip.h>
11#include <rte_tcp.h>
12#include <rte_udp.h>
13#include <rte_sctp.h>
7c673cae
FG
14
15#include "enic_compat.h"
16#include "enic.h"
17#include "wq_enet_desc.h"
18#include "rq_enet_desc.h"
19#include "cq_enet_desc.h"
20#include "vnic_enet.h"
21#include "vnic_dev.h"
22#include "vnic_wq.h"
23#include "vnic_rq.h"
24#include "vnic_cq.h"
25#include "vnic_intr.h"
26#include "vnic_nic.h"
27
11fdf7f2 28#ifdef RTE_ARCH_X86
7c673cae
FG
29#include <rte_hash_crc.h>
30#define DEFAULT_HASH_FUNC rte_hash_crc
31#else
32#include <rte_jhash.h>
33#define DEFAULT_HASH_FUNC rte_jhash
34#endif
35
36#define ENICPMD_CLSF_HASH_ENTRIES ENICPMD_FDIR_MAX
37
9f95a23c
TL
38static void copy_fltr_v1(struct filter_v2 *fltr,
39 const struct rte_eth_fdir_input *input,
40 const struct rte_eth_fdir_masks *masks);
41static void copy_fltr_v2(struct filter_v2 *fltr,
42 const struct rte_eth_fdir_input *input,
43 const struct rte_eth_fdir_masks *masks);
44
7c673cae
FG
45void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats)
46{
47 *stats = enic->fdir.stats;
48}
49
50void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *info)
51{
52 info->mode = (enum rte_fdir_mode)enic->fdir.modes;
53 info->flow_types_mask[0] = enic->fdir.types_mask;
54}
55
56void enic_fdir_info(struct enic *enic)
57{
f67539c2 58 enic->fdir.modes = (uint32_t)RTE_FDIR_MODE_PERFECT;
7c673cae
FG
59 enic->fdir.types_mask = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP |
60 1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
61 if (enic->adv_filters) {
62 enic->fdir.types_mask |= 1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER |
63 1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP |
64 1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP |
65 1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP |
66 1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP |
67 1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
68 enic->fdir.copy_fltr_fn = copy_fltr_v2;
69 } else {
70 enic->fdir.copy_fltr_fn = copy_fltr_v1;
71 }
72}
73
74static void
75enic_set_layer(struct filter_generic_1 *gp, unsigned int flag,
76 enum filter_generic_1_layer layer, void *mask, void *val,
77 unsigned int len)
78{
79 gp->mask_flags |= flag;
80 gp->val_flags |= gp->mask_flags;
81 memcpy(gp->layer[layer].mask, mask, len);
82 memcpy(gp->layer[layer].val, val, len);
83}
84
85/* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
86 * without advanced filter support.
87 */
9f95a23c
TL
88static void
89copy_fltr_v1(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
90 __rte_unused const struct rte_eth_fdir_masks *masks)
7c673cae
FG
91{
92 fltr->type = FILTER_IPV4_5TUPLE;
93 fltr->u.ipv4.src_addr = rte_be_to_cpu_32(
94 input->flow.ip4_flow.src_ip);
95 fltr->u.ipv4.dst_addr = rte_be_to_cpu_32(
96 input->flow.ip4_flow.dst_ip);
97 fltr->u.ipv4.src_port = rte_be_to_cpu_16(
98 input->flow.udp4_flow.src_port);
99 fltr->u.ipv4.dst_port = rte_be_to_cpu_16(
100 input->flow.udp4_flow.dst_port);
101
102 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
103 fltr->u.ipv4.protocol = PROTO_TCP;
104 else
105 fltr->u.ipv4.protocol = PROTO_UDP;
106
107 fltr->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
108}
109
110/* Copy Flow Director filter to a VIC generic filter (requires advanced
111 * filter support.
112 */
9f95a23c
TL
113static void
114copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
115 const struct rte_eth_fdir_masks *masks)
7c673cae
FG
116{
117 struct filter_generic_1 *gp = &fltr->u.generic_1;
7c673cae
FG
118
119 fltr->type = FILTER_DPDK_1;
120 memset(gp, 0, sizeof(*gp));
121
122 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
f67539c2 123 struct rte_udp_hdr udp_mask, udp_val;
7c673cae
FG
124 memset(&udp_mask, 0, sizeof(udp_mask));
125 memset(&udp_val, 0, sizeof(udp_val));
126
127 if (input->flow.udp4_flow.src_port) {
128 udp_mask.src_port = masks->src_port_mask;
129 udp_val.src_port = input->flow.udp4_flow.src_port;
130 }
131 if (input->flow.udp4_flow.dst_port) {
132 udp_mask.dst_port = masks->dst_port_mask;
133 udp_val.dst_port = input->flow.udp4_flow.dst_port;
134 }
135
136 enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
f67539c2 137 &udp_mask, &udp_val, sizeof(struct rte_udp_hdr));
7c673cae 138 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
f67539c2 139 struct rte_tcp_hdr tcp_mask, tcp_val;
7c673cae
FG
140 memset(&tcp_mask, 0, sizeof(tcp_mask));
141 memset(&tcp_val, 0, sizeof(tcp_val));
142
143 if (input->flow.tcp4_flow.src_port) {
144 tcp_mask.src_port = masks->src_port_mask;
145 tcp_val.src_port = input->flow.tcp4_flow.src_port;
146 }
147 if (input->flow.tcp4_flow.dst_port) {
148 tcp_mask.dst_port = masks->dst_port_mask;
149 tcp_val.dst_port = input->flow.tcp4_flow.dst_port;
150 }
151
152 enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
f67539c2 153 &tcp_mask, &tcp_val, sizeof(struct rte_tcp_hdr));
7c673cae 154 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
f67539c2 155 struct rte_sctp_hdr sctp_mask, sctp_val;
7c673cae
FG
156 memset(&sctp_mask, 0, sizeof(sctp_mask));
157 memset(&sctp_val, 0, sizeof(sctp_val));
158
159 if (input->flow.sctp4_flow.src_port) {
160 sctp_mask.src_port = masks->src_port_mask;
161 sctp_val.src_port = input->flow.sctp4_flow.src_port;
162 }
163 if (input->flow.sctp4_flow.dst_port) {
164 sctp_mask.dst_port = masks->dst_port_mask;
165 sctp_val.dst_port = input->flow.sctp4_flow.dst_port;
166 }
167 if (input->flow.sctp4_flow.verify_tag) {
168 sctp_mask.tag = 0xffffffff;
169 sctp_val.tag = input->flow.sctp4_flow.verify_tag;
170 }
171
9f95a23c
TL
172 /*
173 * Unlike UDP/TCP (FILTER_GENERIC_1_{UDP,TCP}), the firmware
174 * has no "packet is SCTP" flag. Use flag=0 (generic L4) and
175 * manually set proto_id=sctp below.
176 */
7c673cae 177 enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
f67539c2 178 &sctp_val, sizeof(struct rte_sctp_hdr));
7c673cae
FG
179 }
180
181 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP ||
182 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP ||
183 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP ||
184 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) {
f67539c2
TL
185 struct rte_ipv4_hdr ip4_mask, ip4_val;
186 memset(&ip4_mask, 0, sizeof(struct rte_ipv4_hdr));
187 memset(&ip4_val, 0, sizeof(struct rte_ipv4_hdr));
7c673cae
FG
188
189 if (input->flow.ip4_flow.tos) {
11fdf7f2 190 ip4_mask.type_of_service = masks->ipv4_mask.tos;
7c673cae
FG
191 ip4_val.type_of_service = input->flow.ip4_flow.tos;
192 }
193 if (input->flow.ip4_flow.ttl) {
11fdf7f2 194 ip4_mask.time_to_live = masks->ipv4_mask.ttl;
7c673cae
FG
195 ip4_val.time_to_live = input->flow.ip4_flow.ttl;
196 }
197 if (input->flow.ip4_flow.proto) {
11fdf7f2 198 ip4_mask.next_proto_id = masks->ipv4_mask.proto;
7c673cae 199 ip4_val.next_proto_id = input->flow.ip4_flow.proto;
9f95a23c
TL
200 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
201 /* Explicitly match the SCTP protocol number */
202 ip4_mask.next_proto_id = 0xff;
203 ip4_val.next_proto_id = IPPROTO_SCTP;
7c673cae
FG
204 }
205 if (input->flow.ip4_flow.src_ip) {
206 ip4_mask.src_addr = masks->ipv4_mask.src_ip;
207 ip4_val.src_addr = input->flow.ip4_flow.src_ip;
208 }
209 if (input->flow.ip4_flow.dst_ip) {
210 ip4_mask.dst_addr = masks->ipv4_mask.dst_ip;
211 ip4_val.dst_addr = input->flow.ip4_flow.dst_ip;
212 }
213
214 enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3,
f67539c2 215 &ip4_mask, &ip4_val, sizeof(struct rte_ipv4_hdr));
7c673cae
FG
216 }
217
218 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
f67539c2 219 struct rte_udp_hdr udp_mask, udp_val;
7c673cae
FG
220 memset(&udp_mask, 0, sizeof(udp_mask));
221 memset(&udp_val, 0, sizeof(udp_val));
222
223 if (input->flow.udp6_flow.src_port) {
224 udp_mask.src_port = masks->src_port_mask;
225 udp_val.src_port = input->flow.udp6_flow.src_port;
226 }
227 if (input->flow.udp6_flow.dst_port) {
228 udp_mask.dst_port = masks->dst_port_mask;
229 udp_val.dst_port = input->flow.udp6_flow.dst_port;
230 }
231 enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
f67539c2 232 &udp_mask, &udp_val, sizeof(struct rte_udp_hdr));
7c673cae 233 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
f67539c2 234 struct rte_tcp_hdr tcp_mask, tcp_val;
7c673cae
FG
235 memset(&tcp_mask, 0, sizeof(tcp_mask));
236 memset(&tcp_val, 0, sizeof(tcp_val));
237
238 if (input->flow.tcp6_flow.src_port) {
239 tcp_mask.src_port = masks->src_port_mask;
240 tcp_val.src_port = input->flow.tcp6_flow.src_port;
241 }
242 if (input->flow.tcp6_flow.dst_port) {
243 tcp_mask.dst_port = masks->dst_port_mask;
244 tcp_val.dst_port = input->flow.tcp6_flow.dst_port;
245 }
246 enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
f67539c2 247 &tcp_mask, &tcp_val, sizeof(struct rte_tcp_hdr));
7c673cae 248 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
f67539c2 249 struct rte_sctp_hdr sctp_mask, sctp_val;
7c673cae
FG
250 memset(&sctp_mask, 0, sizeof(sctp_mask));
251 memset(&sctp_val, 0, sizeof(sctp_val));
252
253 if (input->flow.sctp6_flow.src_port) {
254 sctp_mask.src_port = masks->src_port_mask;
255 sctp_val.src_port = input->flow.sctp6_flow.src_port;
256 }
257 if (input->flow.sctp6_flow.dst_port) {
258 sctp_mask.dst_port = masks->dst_port_mask;
259 sctp_val.dst_port = input->flow.sctp6_flow.dst_port;
260 }
261 if (input->flow.sctp6_flow.verify_tag) {
262 sctp_mask.tag = 0xffffffff;
263 sctp_val.tag = input->flow.sctp6_flow.verify_tag;
264 }
265
7c673cae 266 enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
f67539c2 267 &sctp_val, sizeof(struct rte_sctp_hdr));
7c673cae
FG
268 }
269
270 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP ||
271 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP ||
272 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP ||
273 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) {
f67539c2
TL
274 struct rte_ipv6_hdr ipv6_mask, ipv6_val;
275 memset(&ipv6_mask, 0, sizeof(struct rte_ipv6_hdr));
276 memset(&ipv6_val, 0, sizeof(struct rte_ipv6_hdr));
7c673cae
FG
277
278 if (input->flow.ipv6_flow.proto) {
11fdf7f2 279 ipv6_mask.proto = masks->ipv6_mask.proto;
7c673cae 280 ipv6_val.proto = input->flow.ipv6_flow.proto;
9f95a23c
TL
281 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
282 /* See comments for IPv4 SCTP above. */
283 ipv6_mask.proto = 0xff;
284 ipv6_val.proto = IPPROTO_SCTP;
7c673cae 285 }
11fdf7f2
TL
286 memcpy(ipv6_mask.src_addr, masks->ipv6_mask.src_ip,
287 sizeof(ipv6_mask.src_addr));
288 memcpy(ipv6_val.src_addr, input->flow.ipv6_flow.src_ip,
289 sizeof(ipv6_val.src_addr));
290 memcpy(ipv6_mask.dst_addr, masks->ipv6_mask.dst_ip,
291 sizeof(ipv6_mask.dst_addr));
292 memcpy(ipv6_val.dst_addr, input->flow.ipv6_flow.dst_ip,
293 sizeof(ipv6_val.dst_addr));
7c673cae 294 if (input->flow.ipv6_flow.tc) {
11fdf7f2
TL
295 ipv6_mask.vtc_flow = masks->ipv6_mask.tc << 12;
296 ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12;
7c673cae
FG
297 }
298 if (input->flow.ipv6_flow.hop_limits) {
11fdf7f2 299 ipv6_mask.hop_limits = masks->ipv6_mask.hop_limits;
7c673cae
FG
300 ipv6_val.hop_limits = input->flow.ipv6_flow.hop_limits;
301 }
302
303 enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3,
f67539c2 304 &ipv6_mask, &ipv6_val, sizeof(struct rte_ipv6_hdr));
7c673cae
FG
305 }
306}
307
308int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
309{
310 int32_t pos;
311 struct enic_fdir_node *key;
312 /* See if the key is in the table */
313 pos = rte_hash_del_key(enic->fdir.hash, params);
314 switch (pos) {
315 case -EINVAL:
316 case -ENOENT:
317 enic->fdir.stats.f_remove++;
318 return -EINVAL;
319 default:
320 /* The entry is present in the table */
321 key = enic->fdir.nodes[pos];
322
323 /* Delete the filter */
324 vnic_dev_classifier(enic->vdev, CLSF_DEL,
11fdf7f2 325 &key->fltr_id, NULL, NULL);
7c673cae
FG
326 rte_free(key);
327 enic->fdir.nodes[pos] = NULL;
328 enic->fdir.stats.free++;
329 enic->fdir.stats.remove++;
330 break;
331 }
332 return 0;
333}
334
335int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
336{
337 struct enic_fdir_node *key;
338 struct filter_v2 fltr;
339 int32_t pos;
f67539c2
TL
340 uint8_t do_free = 0;
341 uint16_t old_fltr_id = 0;
342 uint32_t flowtype_supported;
343 uint16_t flex_bytes;
344 uint16_t queue;
11fdf7f2 345 struct filter_action_v2 action;
7c673cae
FG
346
347 memset(&fltr, 0, sizeof(fltr));
11fdf7f2 348 memset(&action, 0, sizeof(action));
7c673cae
FG
349 flowtype_supported = enic->fdir.types_mask
350 & (1 << params->input.flow_type);
351
352 flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
353 (params->input.flow_ext.flexbytes[0] & 0xFF));
354
355 if (!enic->fdir.hash ||
356 (params->input.flow_ext.vlan_tci & 0xFFF) ||
357 !flowtype_supported || flex_bytes ||
358 params->action.behavior /* drop */) {
359 enic->fdir.stats.f_add++;
360 return -ENOTSUP;
361 }
362
363 /* Get the enicpmd RQ from the DPDK Rx queue */
364 queue = enic_rte_rq_idx_to_sop_idx(params->action.rx_queue);
365
366 if (!enic->rq[queue].in_use)
367 return -EINVAL;
368
369 /* See if the key is already there in the table */
370 pos = rte_hash_del_key(enic->fdir.hash, params);
371 switch (pos) {
372 case -EINVAL:
373 enic->fdir.stats.f_add++;
374 return -EINVAL;
375 case -ENOENT:
376 /* Add a new classifier entry */
377 if (!enic->fdir.stats.free) {
378 enic->fdir.stats.f_add++;
379 return -ENOSPC;
380 }
381 key = rte_zmalloc("enic_fdir_node",
382 sizeof(struct enic_fdir_node), 0);
383 if (!key) {
384 enic->fdir.stats.f_add++;
385 return -ENOMEM;
386 }
387 break;
388 default:
389 /* The entry is already present in the table.
390 * Check if there is a change in queue
391 */
392 key = enic->fdir.nodes[pos];
393 enic->fdir.nodes[pos] = NULL;
394 if (unlikely(key->rq_index == queue)) {
395 /* Nothing to be done */
396 enic->fdir.stats.f_add++;
397 pos = rte_hash_add_key(enic->fdir.hash, params);
398 if (pos < 0) {
399 dev_err(enic, "Add hash key failed\n");
400 return pos;
401 }
402 enic->fdir.nodes[pos] = key;
403 dev_warning(enic,
404 "FDIR rule is already present\n");
405 return 0;
406 }
407
408 if (likely(enic->fdir.stats.free)) {
409 /* Add the filter and then delete the old one.
410 * This is to avoid packets from going into the
411 * default queue during the window between
412 * delete and add
413 */
414 do_free = 1;
415 old_fltr_id = key->fltr_id;
416 } else {
417 /* No free slots in the classifier.
418 * Delete the filter and add the modified one later
419 */
420 vnic_dev_classifier(enic->vdev, CLSF_DEL,
11fdf7f2 421 &key->fltr_id, NULL, NULL);
7c673cae
FG
422 enic->fdir.stats.free++;
423 }
424
425 break;
426 }
427
428 key->filter = *params;
429 key->rq_index = queue;
430
431 enic->fdir.copy_fltr_fn(&fltr, &params->input,
432 &enic->rte_dev->data->dev_conf.fdir_conf.mask);
11fdf7f2
TL
433 action.type = FILTER_ACTION_RQ_STEERING;
434 action.rq_idx = queue;
7c673cae 435
11fdf7f2
TL
436 if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr,
437 &action)) {
7c673cae
FG
438 key->fltr_id = queue;
439 } else {
440 dev_err(enic, "Add classifier entry failed\n");
441 enic->fdir.stats.f_add++;
442 rte_free(key);
443 return -1;
444 }
445
446 if (do_free)
11fdf7f2
TL
447 vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL,
448 NULL);
7c673cae
FG
449 else{
450 enic->fdir.stats.free--;
451 enic->fdir.stats.add++;
452 }
453
454 pos = rte_hash_add_key(enic->fdir.hash, params);
455 if (pos < 0) {
456 enic->fdir.stats.f_add++;
457 dev_err(enic, "Add hash key failed\n");
458 return pos;
459 }
460
461 enic->fdir.nodes[pos] = key;
462 return 0;
463}
464
465void enic_clsf_destroy(struct enic *enic)
466{
f67539c2 467 uint32_t index;
7c673cae
FG
468 struct enic_fdir_node *key;
469 /* delete classifier entries */
470 for (index = 0; index < ENICPMD_FDIR_MAX; index++) {
471 key = enic->fdir.nodes[index];
472 if (key) {
473 vnic_dev_classifier(enic->vdev, CLSF_DEL,
11fdf7f2 474 &key->fltr_id, NULL, NULL);
7c673cae
FG
475 rte_free(key);
476 enic->fdir.nodes[index] = NULL;
477 }
478 }
479
480 if (enic->fdir.hash) {
481 rte_hash_free(enic->fdir.hash);
482 enic->fdir.hash = NULL;
483 }
484}
485
486int enic_clsf_init(struct enic *enic)
487{
488 char clsf_name[RTE_HASH_NAMESIZE];
489 struct rte_hash_parameters hash_params = {
490 .name = clsf_name,
491 .entries = ENICPMD_CLSF_HASH_ENTRIES,
492 .key_len = sizeof(struct rte_eth_fdir_filter),
493 .hash_func = DEFAULT_HASH_FUNC,
494 .hash_func_init_val = 0,
495 .socket_id = SOCKET_ID_ANY,
496 };
497 snprintf(clsf_name, RTE_HASH_NAMESIZE, "enic_clsf_%s", enic->bdf_name);
498 enic->fdir.hash = rte_hash_create(&hash_params);
499 memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats));
500 enic->fdir.stats.free = ENICPMD_FDIR_MAX;
501 return NULL == enic->fdir.hash;
502}