1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
10 #include <rte_malloc.h>
11 #include <rte_tailq.h>
14 #include "rte_eth_bond_private.h"
16 static struct rte_flow
*
17 bond_flow_alloc(int numa_node
, const struct rte_flow_attr
*attr
,
18 const struct rte_flow_item
*items
,
19 const struct rte_flow_action
*actions
)
21 struct rte_flow
*flow
;
22 const struct rte_flow_conv_rule rule
= {
25 .actions_ro
= actions
,
27 struct rte_flow_error error
;
30 ret
= rte_flow_conv(RTE_FLOW_CONV_OP_RULE
, NULL
, 0, &rule
, &error
);
32 RTE_BOND_LOG(ERR
, "Unable to process flow rule (%s): %s",
33 error
.message
? error
.message
: "unspecified",
37 flow
= rte_zmalloc_socket(NULL
, offsetof(struct rte_flow
, rule
) + ret
,
38 RTE_CACHE_LINE_SIZE
, numa_node
);
39 if (unlikely(flow
== NULL
)) {
40 RTE_BOND_LOG(ERR
, "Could not allocate new flow");
43 ret
= rte_flow_conv(RTE_FLOW_CONV_OP_RULE
, &flow
->rule
, ret
, &rule
,
46 RTE_BOND_LOG(ERR
, "Failed to copy flow rule (%s): %s",
47 error
.message
? error
.message
: "unspecified",
56 bond_flow_release(struct rte_flow
**flow
)
63 bond_flow_validate(struct rte_eth_dev
*dev
, const struct rte_flow_attr
*attr
,
64 const struct rte_flow_item patterns
[],
65 const struct rte_flow_action actions
[],
66 struct rte_flow_error
*err
)
68 struct bond_dev_private
*internals
= dev
->data
->dev_private
;
72 for (i
= 0; i
< internals
->slave_count
; i
++) {
73 ret
= rte_flow_validate(internals
->slaves
[i
].port_id
, attr
,
74 patterns
, actions
, err
);
76 RTE_BOND_LOG(ERR
, "Operation rte_flow_validate failed"
77 " for slave %d with error %d", i
, ret
);
84 static struct rte_flow
*
85 bond_flow_create(struct rte_eth_dev
*dev
, const struct rte_flow_attr
*attr
,
86 const struct rte_flow_item patterns
[],
87 const struct rte_flow_action actions
[],
88 struct rte_flow_error
*err
)
90 struct bond_dev_private
*internals
= dev
->data
->dev_private
;
91 struct rte_flow
*flow
;
94 flow
= bond_flow_alloc(dev
->data
->numa_node
, attr
, patterns
, actions
);
95 if (unlikely(flow
== NULL
)) {
96 rte_flow_error_set(err
, ENOMEM
, RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
97 NULL
, rte_strerror(ENOMEM
));
100 for (i
= 0; i
< internals
->slave_count
; i
++) {
101 flow
->flows
[i
] = rte_flow_create(internals
->slaves
[i
].port_id
,
102 attr
, patterns
, actions
, err
);
103 if (unlikely(flow
->flows
[i
] == NULL
)) {
104 RTE_BOND_LOG(ERR
, "Failed to create flow on slave %d",
109 TAILQ_INSERT_TAIL(&internals
->flow_list
, flow
, next
);
112 /* Destroy all slaves flows. */
113 for (i
= 0; i
< internals
->slave_count
; i
++) {
114 if (flow
->flows
[i
] != NULL
)
115 rte_flow_destroy(internals
->slaves
[i
].port_id
,
116 flow
->flows
[i
], err
);
118 bond_flow_release(&flow
);
123 bond_flow_destroy(struct rte_eth_dev
*dev
, struct rte_flow
*flow
,
124 struct rte_flow_error
*err
)
126 struct bond_dev_private
*internals
= dev
->data
->dev_private
;
130 for (i
= 0; i
< internals
->slave_count
; i
++) {
133 if (unlikely(flow
->flows
[i
] == NULL
))
135 lret
= rte_flow_destroy(internals
->slaves
[i
].port_id
,
136 flow
->flows
[i
], err
);
137 if (unlikely(lret
!= 0)) {
138 RTE_BOND_LOG(ERR
, "Failed to destroy flow on slave %d:"
143 TAILQ_REMOVE(&internals
->flow_list
, flow
, next
);
144 bond_flow_release(&flow
);
149 bond_flow_flush(struct rte_eth_dev
*dev
, struct rte_flow_error
*err
)
151 struct bond_dev_private
*internals
= dev
->data
->dev_private
;
152 struct rte_flow
*flow
;
157 /* Destroy all bond flows from its slaves instead of flushing them to
158 * keep the LACP flow or any other external flows.
160 TAILQ_FOREACH_SAFE(flow
, &internals
->flow_list
, next
, tmp
) {
161 lret
= bond_flow_destroy(dev
, flow
, err
);
162 if (unlikely(lret
!= 0))
165 if (unlikely(ret
!= 0))
166 RTE_BOND_LOG(ERR
, "Failed to flush flow in all slaves");
171 bond_flow_query_count(struct rte_eth_dev
*dev
, struct rte_flow
*flow
,
172 const struct rte_flow_action
*action
,
173 struct rte_flow_query_count
*count
,
174 struct rte_flow_error
*err
)
176 struct bond_dev_private
*internals
= dev
->data
->dev_private
;
177 struct rte_flow_query_count slave_count
;
183 rte_memcpy(&slave_count
, count
, sizeof(slave_count
));
184 for (i
= 0; i
< internals
->slave_count
; i
++) {
185 ret
= rte_flow_query(internals
->slaves
[i
].port_id
,
186 flow
->flows
[i
], action
,
188 if (unlikely(ret
!= 0)) {
189 RTE_BOND_LOG(ERR
, "Failed to query flow on"
190 " slave %d: %d", i
, ret
);
193 count
->bytes
+= slave_count
.bytes
;
194 count
->hits
+= slave_count
.hits
;
195 slave_count
.bytes
= 0;
196 slave_count
.hits
= 0;
202 bond_flow_query(struct rte_eth_dev
*dev
, struct rte_flow
*flow
,
203 const struct rte_flow_action
*action
, void *arg
,
204 struct rte_flow_error
*err
)
206 switch (action
->type
) {
207 case RTE_FLOW_ACTION_TYPE_COUNT
:
208 return bond_flow_query_count(dev
, flow
, action
, arg
, err
);
210 return rte_flow_error_set(err
, ENOTSUP
,
211 RTE_FLOW_ERROR_TYPE_ACTION
, arg
,
212 rte_strerror(ENOTSUP
));
217 bond_flow_isolate(struct rte_eth_dev
*dev
, int set
,
218 struct rte_flow_error
*err
)
220 struct bond_dev_private
*internals
= dev
->data
->dev_private
;
224 for (i
= 0; i
< internals
->slave_count
; i
++) {
225 ret
= rte_flow_isolate(internals
->slaves
[i
].port_id
, set
, err
);
226 if (unlikely(ret
!= 0)) {
227 RTE_BOND_LOG(ERR
, "Operation rte_flow_isolate failed"
228 " for slave %d with error %d", i
, ret
);
229 internals
->flow_isolated_valid
= 0;
233 internals
->flow_isolated
= set
;
234 internals
->flow_isolated_valid
= 1;
238 const struct rte_flow_ops bond_flow_ops
= {
239 .validate
= bond_flow_validate
,
240 .create
= bond_flow_create
,
241 .destroy
= bond_flow_destroy
,
242 .flush
= bond_flow_flush
,
243 .query
= bond_flow_query
,
244 .isolate
= bond_flow_isolate
,