]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
net/mlx5_core: Add utilities to find next and prev flow-tables
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / fs_cmd.c
CommitLineData
26a81453
MG
1/*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mlx5/driver.h>
34#include <linux/mlx5/device.h>
35#include <linux/mlx5/mlx5_ifc.h>
36
37#include "fs_core.h"
38#include "fs_cmd.h"
39#include "mlx5_core.h"
40
41int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
42 enum fs_flow_table_type type, unsigned int level,
43 unsigned int log_size, unsigned int *table_id)
44{
45 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
46 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
47 int err;
48
49 memset(in, 0, sizeof(in));
50
51 MLX5_SET(create_flow_table_in, in, opcode,
52 MLX5_CMD_OP_CREATE_FLOW_TABLE);
53
54 MLX5_SET(create_flow_table_in, in, table_type, type);
55 MLX5_SET(create_flow_table_in, in, level, level);
56 MLX5_SET(create_flow_table_in, in, log_size, log_size);
57
58 memset(out, 0, sizeof(out));
59 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
60 sizeof(out));
61
62 if (!err)
63 *table_id = MLX5_GET(create_flow_table_out, out,
64 table_id);
65 return err;
66}
67
68int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
69 struct mlx5_flow_table *ft)
70{
71 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
72 u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
73
74 memset(in, 0, sizeof(in));
75 memset(out, 0, sizeof(out));
76
77 MLX5_SET(destroy_flow_table_in, in, opcode,
78 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
79 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
80 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
81
82 return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
83 sizeof(out));
84}
85
86int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
87 struct mlx5_flow_table *ft,
88 u32 *in,
89 unsigned int *group_id)
90{
91 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
92 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
93 int err;
94
95 memset(out, 0, sizeof(out));
96
97 MLX5_SET(create_flow_group_in, in, opcode,
98 MLX5_CMD_OP_CREATE_FLOW_GROUP);
99 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
100 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
101
102 err = mlx5_cmd_exec_check_status(dev, in,
103 inlen, out,
104 sizeof(out));
105 if (!err)
106 *group_id = MLX5_GET(create_flow_group_out, out,
107 group_id);
108
109 return err;
110}
111
112int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
113 struct mlx5_flow_table *ft,
114 unsigned int group_id)
115{
116 u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
117 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
118
119 memset(in, 0, sizeof(in));
120 memset(out, 0, sizeof(out));
121
122 MLX5_SET(destroy_flow_group_in, in, opcode,
123 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
124 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
125 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
126 MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
127
128 return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
129 sizeof(out));
130}
131
132static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
133 int opmod, int modify_mask,
134 struct mlx5_flow_table *ft,
135 unsigned group_id,
136 struct fs_fte *fte)
137{
138 unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
139 fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
140 u32 out[MLX5_ST_SZ_DW(set_fte_out)];
141 struct mlx5_flow_rule *dst;
142 void *in_flow_context;
143 void *in_match_value;
144 void *in_dests;
145 u32 *in;
146 int err;
147
148 in = mlx5_vzalloc(inlen);
149 if (!in) {
150 mlx5_core_warn(dev, "failed to allocate inbox\n");
151 return -ENOMEM;
152 }
153
154 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
155 MLX5_SET(set_fte_in, in, op_mod, opmod);
156 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
157 MLX5_SET(set_fte_in, in, table_type, ft->type);
158 MLX5_SET(set_fte_in, in, table_id, ft->id);
159 MLX5_SET(set_fte_in, in, flow_index, fte->index);
160
161 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
162 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
163 MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
164 MLX5_SET(flow_context, in_flow_context, action, fte->action);
165 MLX5_SET(flow_context, in_flow_context, destination_list_size,
166 fte->dests_size);
167 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
168 match_value);
169 memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
170
171 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
172 list_for_each_entry(dst, &fte->node.children, node.list) {
173 unsigned int id;
174
175 MLX5_SET(dest_format_struct, in_dests, destination_type,
176 dst->dest_attr.type);
177 if (dst->dest_attr.type ==
178 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
179 id = dst->dest_attr.ft->id;
180 else
181 id = dst->dest_attr.tir_num;
182 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
183 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
184 }
185 memset(out, 0, sizeof(out));
186 err = mlx5_cmd_exec_check_status(dev, in, inlen, out,
187 sizeof(out));
188 kvfree(in);
189
190 return err;
191}
192
193int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
194 struct mlx5_flow_table *ft,
195 unsigned group_id,
196 struct fs_fte *fte)
197{
198 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
199}
200
201int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
202 struct mlx5_flow_table *ft,
203 unsigned group_id,
204 struct fs_fte *fte)
205{
206 int opmod;
207 int modify_mask;
208 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
209 flow_table_properties_nic_receive.
210 flow_modify_en);
211 if (!atomic_mod_cap)
212 return -ENOTSUPP;
213 opmod = 1;
214 modify_mask = 1 <<
215 MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST;
216
217 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
218}
219
220int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
221 struct mlx5_flow_table *ft,
222 unsigned int index)
223{
224 u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
225 u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
226 int err;
227
228 memset(in, 0, sizeof(in));
229 memset(out, 0, sizeof(out));
230
231 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
232 MLX5_SET(delete_fte_in, in, table_type, ft->type);
233 MLX5_SET(delete_fte_in, in, table_id, ft->id);
234 MLX5_SET(delete_fte_in, in, flow_index, index);
235
236 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
237
238 return err;
239}