]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_flow.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / bonding / rte_eth_bond_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
3 */
4
5 #include <stddef.h>
6 #include <string.h>
7 #include <sys/queue.h>
8
9 #include <rte_errno.h>
10 #include <rte_malloc.h>
11 #include <rte_tailq.h>
12 #include <rte_flow.h>
13
14 #include "rte_eth_bond_private.h"
15
16 static struct rte_flow *
17 bond_flow_alloc(int numa_node, const struct rte_flow_attr *attr,
18 const struct rte_flow_item *items,
19 const struct rte_flow_action *actions)
20 {
21 struct rte_flow *flow;
22 const struct rte_flow_conv_rule rule = {
23 .attr_ro = attr,
24 .pattern_ro = items,
25 .actions_ro = actions,
26 };
27 struct rte_flow_error error;
28 int ret;
29
30 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, &error);
31 if (ret < 0) {
32 RTE_BOND_LOG(ERR, "Unable to process flow rule (%s): %s",
33 error.message ? error.message : "unspecified",
34 strerror(rte_errno));
35 return NULL;
36 }
37 flow = rte_zmalloc_socket(NULL, offsetof(struct rte_flow, rule) + ret,
38 RTE_CACHE_LINE_SIZE, numa_node);
39 if (unlikely(flow == NULL)) {
40 RTE_BOND_LOG(ERR, "Could not allocate new flow");
41 return NULL;
42 }
43 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &flow->rule, ret, &rule,
44 &error);
45 if (ret < 0) {
46 RTE_BOND_LOG(ERR, "Failed to copy flow rule (%s): %s",
47 error.message ? error.message : "unspecified",
48 strerror(rte_errno));
49 rte_free(flow);
50 return NULL;
51 }
52 return flow;
53 }
54
55 static void
56 bond_flow_release(struct rte_flow **flow)
57 {
58 rte_free(*flow);
59 *flow = NULL;
60 }
61
62 static int
63 bond_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
64 const struct rte_flow_item patterns[],
65 const struct rte_flow_action actions[],
66 struct rte_flow_error *err)
67 {
68 struct bond_dev_private *internals = dev->data->dev_private;
69 int i;
70 int ret;
71
72 for (i = 0; i < internals->slave_count; i++) {
73 ret = rte_flow_validate(internals->slaves[i].port_id, attr,
74 patterns, actions, err);
75 if (ret) {
76 RTE_BOND_LOG(ERR, "Operation rte_flow_validate failed"
77 " for slave %d with error %d", i, ret);
78 return ret;
79 }
80 }
81 return 0;
82 }
83
84 static struct rte_flow *
85 bond_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
86 const struct rte_flow_item patterns[],
87 const struct rte_flow_action actions[],
88 struct rte_flow_error *err)
89 {
90 struct bond_dev_private *internals = dev->data->dev_private;
91 struct rte_flow *flow;
92 int i;
93
94 flow = bond_flow_alloc(dev->data->numa_node, attr, patterns, actions);
95 if (unlikely(flow == NULL)) {
96 rte_flow_error_set(err, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
97 NULL, rte_strerror(ENOMEM));
98 return NULL;
99 }
100 for (i = 0; i < internals->slave_count; i++) {
101 flow->flows[i] = rte_flow_create(internals->slaves[i].port_id,
102 attr, patterns, actions, err);
103 if (unlikely(flow->flows[i] == NULL)) {
104 RTE_BOND_LOG(ERR, "Failed to create flow on slave %d",
105 i);
106 goto err;
107 }
108 }
109 TAILQ_INSERT_TAIL(&internals->flow_list, flow, next);
110 return flow;
111 err:
112 /* Destroy all slaves flows. */
113 for (i = 0; i < internals->slave_count; i++) {
114 if (flow->flows[i] != NULL)
115 rte_flow_destroy(internals->slaves[i].port_id,
116 flow->flows[i], err);
117 }
118 bond_flow_release(&flow);
119 return NULL;
120 }
121
122 static int
123 bond_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
124 struct rte_flow_error *err)
125 {
126 struct bond_dev_private *internals = dev->data->dev_private;
127 int i;
128 int ret = 0;
129
130 for (i = 0; i < internals->slave_count; i++) {
131 int lret;
132
133 if (unlikely(flow->flows[i] == NULL))
134 continue;
135 lret = rte_flow_destroy(internals->slaves[i].port_id,
136 flow->flows[i], err);
137 if (unlikely(lret != 0)) {
138 RTE_BOND_LOG(ERR, "Failed to destroy flow on slave %d:"
139 " %d", i, lret);
140 ret = lret;
141 }
142 }
143 TAILQ_REMOVE(&internals->flow_list, flow, next);
144 bond_flow_release(&flow);
145 return ret;
146 }
147
148 static int
149 bond_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *err)
150 {
151 struct bond_dev_private *internals = dev->data->dev_private;
152 struct rte_flow *flow;
153 void *tmp;
154 int ret = 0;
155 int lret;
156
157 /* Destroy all bond flows from its slaves instead of flushing them to
158 * keep the LACP flow or any other external flows.
159 */
160 TAILQ_FOREACH_SAFE(flow, &internals->flow_list, next, tmp) {
161 lret = bond_flow_destroy(dev, flow, err);
162 if (unlikely(lret != 0))
163 ret = lret;
164 }
165 if (unlikely(ret != 0))
166 RTE_BOND_LOG(ERR, "Failed to flush flow in all slaves");
167 return ret;
168 }
169
170 static int
171 bond_flow_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
172 const struct rte_flow_action *action,
173 struct rte_flow_query_count *count,
174 struct rte_flow_error *err)
175 {
176 struct bond_dev_private *internals = dev->data->dev_private;
177 struct rte_flow_query_count slave_count;
178 int i;
179 int ret;
180
181 count->bytes = 0;
182 count->hits = 0;
183 rte_memcpy(&slave_count, count, sizeof(slave_count));
184 for (i = 0; i < internals->slave_count; i++) {
185 ret = rte_flow_query(internals->slaves[i].port_id,
186 flow->flows[i], action,
187 &slave_count, err);
188 if (unlikely(ret != 0)) {
189 RTE_BOND_LOG(ERR, "Failed to query flow on"
190 " slave %d: %d", i, ret);
191 return ret;
192 }
193 count->bytes += slave_count.bytes;
194 count->hits += slave_count.hits;
195 slave_count.bytes = 0;
196 slave_count.hits = 0;
197 }
198 return 0;
199 }
200
201 static int
202 bond_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
203 const struct rte_flow_action *action, void *arg,
204 struct rte_flow_error *err)
205 {
206 switch (action->type) {
207 case RTE_FLOW_ACTION_TYPE_COUNT:
208 return bond_flow_query_count(dev, flow, action, arg, err);
209 default:
210 return rte_flow_error_set(err, ENOTSUP,
211 RTE_FLOW_ERROR_TYPE_ACTION, arg,
212 rte_strerror(ENOTSUP));
213 }
214 }
215
216 static int
217 bond_flow_isolate(struct rte_eth_dev *dev, int set,
218 struct rte_flow_error *err)
219 {
220 struct bond_dev_private *internals = dev->data->dev_private;
221 int i;
222 int ret;
223
224 for (i = 0; i < internals->slave_count; i++) {
225 ret = rte_flow_isolate(internals->slaves[i].port_id, set, err);
226 if (unlikely(ret != 0)) {
227 RTE_BOND_LOG(ERR, "Operation rte_flow_isolate failed"
228 " for slave %d with error %d", i, ret);
229 internals->flow_isolated_valid = 0;
230 return ret;
231 }
232 }
233 internals->flow_isolated = set;
234 internals->flow_isolated_valid = 1;
235 return 0;
236 }
237
238 const struct rte_flow_ops bond_flow_ops = {
239 .validate = bond_flow_validate,
240 .create = bond_flow_create,
241 .destroy = bond_flow_destroy,
242 .flush = bond_flow_flush,
243 .query = bond_flow_query,
244 .isolate = bond_flow_isolate,
245 };