2 * Copyright (C) 2017 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/hash.h>
35 #include <linux/hashtable.h>
36 #include <linux/jhash.h>
37 #include <linux/vmalloc.h>
38 #include <net/pkt_cls.h>
42 #include "../nfp_app.h"
44 struct nfp_mask_id_table
{
45 struct hlist_node link
;
51 static int nfp_release_stats_entry(struct nfp_app
*app
, u32 stats_context_id
)
53 struct nfp_flower_priv
*priv
= app
->priv
;
54 struct circ_buf
*ring
;
56 ring
= &priv
->stats_ids
.free_list
;
57 /* Check if buffer is full. */
58 if (!CIRC_SPACE(ring
->head
, ring
->tail
, NFP_FL_STATS_ENTRY_RS
*
59 NFP_FL_STATS_ELEM_RS
-
60 NFP_FL_STATS_ELEM_RS
+ 1))
63 memcpy(&ring
->buf
[ring
->head
], &stats_context_id
, NFP_FL_STATS_ELEM_RS
);
64 ring
->head
= (ring
->head
+ NFP_FL_STATS_ELEM_RS
) %
65 (NFP_FL_STATS_ENTRY_RS
* NFP_FL_STATS_ELEM_RS
);
70 static int nfp_get_stats_entry(struct nfp_app
*app
, u32
*stats_context_id
)
72 struct nfp_flower_priv
*priv
= app
->priv
;
73 u32 freed_stats_id
, temp_stats_id
;
74 struct circ_buf
*ring
;
76 ring
= &priv
->stats_ids
.free_list
;
77 freed_stats_id
= NFP_FL_STATS_ENTRY_RS
;
78 /* Check for unallocated entries first. */
79 if (priv
->stats_ids
.init_unalloc
> 0) {
80 *stats_context_id
= priv
->stats_ids
.init_unalloc
- 1;
81 priv
->stats_ids
.init_unalloc
--;
85 /* Check if buffer is empty. */
86 if (ring
->head
== ring
->tail
) {
87 *stats_context_id
= freed_stats_id
;
91 memcpy(&temp_stats_id
, &ring
->buf
[ring
->tail
], NFP_FL_STATS_ELEM_RS
);
92 *stats_context_id
= temp_stats_id
;
93 memcpy(&ring
->buf
[ring
->tail
], &freed_stats_id
, NFP_FL_STATS_ELEM_RS
);
94 ring
->tail
= (ring
->tail
+ NFP_FL_STATS_ELEM_RS
) %
95 (NFP_FL_STATS_ENTRY_RS
* NFP_FL_STATS_ELEM_RS
);
100 /* Must be called with either RTNL or rcu_read_lock */
101 struct nfp_fl_payload
*
102 nfp_flower_search_fl_table(struct nfp_app
*app
, unsigned long tc_flower_cookie
)
104 struct nfp_flower_priv
*priv
= app
->priv
;
105 struct nfp_fl_payload
*flower_entry
;
107 hash_for_each_possible_rcu(priv
->flow_table
, flower_entry
, link
,
109 if (flower_entry
->tc_flower_cookie
== tc_flower_cookie
)
116 nfp_flower_update_stats(struct nfp_app
*app
, struct nfp_fl_stats_frame
*stats
)
118 struct nfp_fl_payload
*nfp_flow
;
119 unsigned long flower_cookie
;
121 flower_cookie
= be64_to_cpu(stats
->stats_cookie
);
124 nfp_flow
= nfp_flower_search_fl_table(app
, flower_cookie
);
126 goto exit_rcu_unlock
;
128 if (nfp_flow
->meta
.host_ctx_id
!= stats
->stats_con_id
)
129 goto exit_rcu_unlock
;
131 spin_lock(&nfp_flow
->lock
);
132 nfp_flow
->stats
.pkts
+= be32_to_cpu(stats
->pkt_count
);
133 nfp_flow
->stats
.bytes
+= be64_to_cpu(stats
->byte_count
);
134 nfp_flow
->stats
.used
= jiffies
;
135 spin_unlock(&nfp_flow
->lock
);
141 void nfp_flower_rx_flow_stats(struct nfp_app
*app
, struct sk_buff
*skb
)
143 unsigned int msg_len
= skb
->len
- NFP_FLOWER_CMSG_HLEN
;
144 struct nfp_fl_stats_frame
*stats_frame
;
148 msg
= nfp_flower_cmsg_get_data(skb
);
150 stats_frame
= (struct nfp_fl_stats_frame
*)msg
;
151 for (i
= 0; i
< msg_len
/ sizeof(*stats_frame
); i
++)
152 nfp_flower_update_stats(app
, stats_frame
+ i
);
155 static int nfp_release_mask_id(struct nfp_app
*app
, u8 mask_id
)
157 struct nfp_flower_priv
*priv
= app
->priv
;
158 struct circ_buf
*ring
;
159 struct timespec64 now
;
161 ring
= &priv
->mask_ids
.mask_id_free_list
;
162 /* Checking if buffer is full. */
163 if (CIRC_SPACE(ring
->head
, ring
->tail
, NFP_FLOWER_MASK_ENTRY_RS
) == 0)
166 memcpy(&ring
->buf
[ring
->head
], &mask_id
, NFP_FLOWER_MASK_ELEMENT_RS
);
167 ring
->head
= (ring
->head
+ NFP_FLOWER_MASK_ELEMENT_RS
) %
168 (NFP_FLOWER_MASK_ENTRY_RS
* NFP_FLOWER_MASK_ELEMENT_RS
);
170 getnstimeofday64(&now
);
171 priv
->mask_ids
.last_used
[mask_id
] = now
;
176 static int nfp_mask_alloc(struct nfp_app
*app
, u8
*mask_id
)
178 struct nfp_flower_priv
*priv
= app
->priv
;
179 struct timespec64 delta
, now
;
180 struct circ_buf
*ring
;
181 u8 temp_id
, freed_id
;
183 ring
= &priv
->mask_ids
.mask_id_free_list
;
184 freed_id
= NFP_FLOWER_MASK_ENTRY_RS
- 1;
185 /* Checking for unallocated entries first. */
186 if (priv
->mask_ids
.init_unallocated
> 0) {
187 *mask_id
= priv
->mask_ids
.init_unallocated
;
188 priv
->mask_ids
.init_unallocated
--;
192 /* Checking if buffer is empty. */
193 if (ring
->head
== ring
->tail
)
196 memcpy(&temp_id
, &ring
->buf
[ring
->tail
], NFP_FLOWER_MASK_ELEMENT_RS
);
199 getnstimeofday64(&now
);
200 delta
= timespec64_sub(now
, priv
->mask_ids
.last_used
[*mask_id
]);
202 if (timespec64_to_ns(&delta
) < NFP_FL_MASK_REUSE_TIME_NS
)
205 memcpy(&ring
->buf
[ring
->tail
], &freed_id
, NFP_FLOWER_MASK_ELEMENT_RS
);
206 ring
->tail
= (ring
->tail
+ NFP_FLOWER_MASK_ELEMENT_RS
) %
207 (NFP_FLOWER_MASK_ENTRY_RS
* NFP_FLOWER_MASK_ELEMENT_RS
);
217 nfp_add_mask_table(struct nfp_app
*app
, char *mask_data
, u32 mask_len
)
219 struct nfp_flower_priv
*priv
= app
->priv
;
220 struct nfp_mask_id_table
*mask_entry
;
221 unsigned long hash_key
;
224 if (nfp_mask_alloc(app
, &mask_id
))
227 mask_entry
= kmalloc(sizeof(*mask_entry
), GFP_KERNEL
);
229 nfp_release_mask_id(app
, mask_id
);
233 INIT_HLIST_NODE(&mask_entry
->link
);
234 mask_entry
->mask_id
= mask_id
;
235 hash_key
= jhash(mask_data
, mask_len
, priv
->mask_id_seed
);
236 mask_entry
->hash_key
= hash_key
;
237 mask_entry
->ref_cnt
= 1;
238 hash_add(priv
->mask_table
, &mask_entry
->link
, hash_key
);
243 static struct nfp_mask_id_table
*
244 nfp_search_mask_table(struct nfp_app
*app
, char *mask_data
, u32 mask_len
)
246 struct nfp_flower_priv
*priv
= app
->priv
;
247 struct nfp_mask_id_table
*mask_entry
;
248 unsigned long hash_key
;
250 hash_key
= jhash(mask_data
, mask_len
, priv
->mask_id_seed
);
252 hash_for_each_possible(priv
->mask_table
, mask_entry
, link
, hash_key
)
253 if (mask_entry
->hash_key
== hash_key
)
260 nfp_find_in_mask_table(struct nfp_app
*app
, char *mask_data
, u32 mask_len
)
262 struct nfp_mask_id_table
*mask_entry
;
264 mask_entry
= nfp_search_mask_table(app
, mask_data
, mask_len
);
268 mask_entry
->ref_cnt
++;
270 /* Casting u8 to int for later use. */
271 return mask_entry
->mask_id
;
275 nfp_check_mask_add(struct nfp_app
*app
, char *mask_data
, u32 mask_len
,
276 u8
*meta_flags
, u8
*mask_id
)
280 id
= nfp_find_in_mask_table(app
, mask_data
, mask_len
);
282 id
= nfp_add_mask_table(app
, mask_data
, mask_len
);
285 *meta_flags
|= NFP_FL_META_FLAG_NEW_MASK
;
293 nfp_check_mask_remove(struct nfp_app
*app
, char *mask_data
, u32 mask_len
,
294 u8
*meta_flags
, u8
*mask_id
)
296 struct nfp_mask_id_table
*mask_entry
;
298 mask_entry
= nfp_search_mask_table(app
, mask_data
, mask_len
);
302 *mask_id
= mask_entry
->mask_id
;
303 mask_entry
->ref_cnt
--;
304 if (!mask_entry
->ref_cnt
) {
305 hash_del(&mask_entry
->link
);
306 nfp_release_mask_id(app
, *mask_id
);
309 *meta_flags
|= NFP_FL_META_FLAG_LAST_MASK
;
315 int nfp_compile_flow_metadata(struct nfp_app
*app
,
316 struct tc_cls_flower_offload
*flow
,
317 struct nfp_fl_payload
*nfp_flow
)
319 struct nfp_flower_priv
*priv
= app
->priv
;
320 struct nfp_fl_payload
*check_entry
;
324 if (nfp_get_stats_entry(app
, &stats_cxt
))
327 nfp_flow
->meta
.host_ctx_id
= cpu_to_be32(stats_cxt
);
328 nfp_flow
->meta
.host_cookie
= cpu_to_be64(flow
->cookie
);
331 if (!nfp_check_mask_add(app
, nfp_flow
->mask_data
,
332 nfp_flow
->meta
.mask_len
,
333 &nfp_flow
->meta
.flags
, &new_mask_id
)) {
334 if (nfp_release_stats_entry(app
, stats_cxt
))
339 nfp_flow
->meta
.flow_version
= cpu_to_be64(priv
->flower_version
);
340 priv
->flower_version
++;
342 /* Update flow payload with mask ids. */
343 nfp_flow
->unmasked_data
[NFP_FL_MASK_ID_LOCATION
] = new_mask_id
;
344 nfp_flow
->stats
.pkts
= 0;
345 nfp_flow
->stats
.bytes
= 0;
346 nfp_flow
->stats
.used
= jiffies
;
348 check_entry
= nfp_flower_search_fl_table(app
, flow
->cookie
);
350 if (nfp_release_stats_entry(app
, stats_cxt
))
353 if (!nfp_check_mask_remove(app
, nfp_flow
->mask_data
,
354 nfp_flow
->meta
.mask_len
,
364 int nfp_modify_flow_metadata(struct nfp_app
*app
,
365 struct nfp_fl_payload
*nfp_flow
)
367 struct nfp_flower_priv
*priv
= app
->priv
;
371 nfp_check_mask_remove(app
, nfp_flow
->mask_data
,
372 nfp_flow
->meta
.mask_len
, &nfp_flow
->meta
.flags
,
375 nfp_flow
->meta
.flow_version
= cpu_to_be64(priv
->flower_version
);
376 priv
->flower_version
++;
378 /* Update flow payload with mask ids. */
379 nfp_flow
->unmasked_data
[NFP_FL_MASK_ID_LOCATION
] = new_mask_id
;
381 /* Release the stats ctx id. */
382 temp_ctx_id
= be32_to_cpu(nfp_flow
->meta
.host_ctx_id
);
384 return nfp_release_stats_entry(app
, temp_ctx_id
);
387 int nfp_flower_metadata_init(struct nfp_app
*app
)
389 struct nfp_flower_priv
*priv
= app
->priv
;
391 hash_init(priv
->mask_table
);
392 hash_init(priv
->flow_table
);
393 get_random_bytes(&priv
->mask_id_seed
, sizeof(priv
->mask_id_seed
));
395 /* Init ring buffer and unallocated mask_ids. */
396 priv
->mask_ids
.mask_id_free_list
.buf
=
397 kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS
,
398 NFP_FLOWER_MASK_ELEMENT_RS
, GFP_KERNEL
);
399 if (!priv
->mask_ids
.mask_id_free_list
.buf
)
402 priv
->mask_ids
.init_unallocated
= NFP_FLOWER_MASK_ENTRY_RS
- 1;
404 /* Init timestamps for mask id*/
405 priv
->mask_ids
.last_used
=
406 kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS
,
407 sizeof(*priv
->mask_ids
.last_used
), GFP_KERNEL
);
408 if (!priv
->mask_ids
.last_used
)
409 goto err_free_mask_id
;
411 /* Init ring buffer and unallocated stats_ids. */
412 priv
->stats_ids
.free_list
.buf
=
413 vmalloc(NFP_FL_STATS_ENTRY_RS
* NFP_FL_STATS_ELEM_RS
);
414 if (!priv
->stats_ids
.free_list
.buf
)
415 goto err_free_last_used
;
417 priv
->stats_ids
.init_unalloc
= NFP_FL_REPEATED_HASH_MAX
;
422 kfree(priv
->mask_ids
.last_used
);
424 kfree(priv
->mask_ids
.mask_id_free_list
.buf
);
428 void nfp_flower_metadata_cleanup(struct nfp_app
*app
)
430 struct nfp_flower_priv
*priv
= app
->priv
;
435 kfree(priv
->mask_ids
.mask_id_free_list
.buf
);
436 kfree(priv
->mask_ids
.last_used
);
437 vfree(priv
->stats_ids
.free_list
.buf
);