]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/mellanox/mlxsw/item.h
Merge ath-next from ath.git
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / item.h
CommitLineData
93c1edb2
JP
1/*
2 * drivers/net/ethernet/mellanox/mlxsw/item.h
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the names of the copyright holders nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * Alternatively, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") version 2 as published by the Free
21 * Software Foundation.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 */
35
36#ifndef _MLXSW_ITEM_H
37#define _MLXSW_ITEM_H
38
39#include <linux/types.h>
40#include <linux/string.h>
41#include <linux/bitops.h>
42
43struct mlxsw_item {
44 unsigned short offset; /* bytes in container */
45 unsigned short step; /* step in bytes for indexed items */
46 unsigned short in_step_offset; /* offset within one step */
47 unsigned char shift; /* shift in bits */
48 unsigned char element_size; /* size of element in bit array */
49 bool no_real_shift;
50 union {
51 unsigned char bits;
52 unsigned short bytes;
53 } size;
54 const char *name;
55};
56
57static inline unsigned int
58__mlxsw_item_offset(struct mlxsw_item *item, unsigned short index,
59 size_t typesize)
60{
61 BUG_ON(index && !item->step);
62 if (item->offset % typesize != 0 ||
63 item->step % typesize != 0 ||
64 item->in_step_offset % typesize != 0) {
ed8db18d 65 pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%zx)\n",
93c1edb2
JP
66 item->name, item->offset, item->step,
67 item->in_step_offset, typesize);
68 BUG();
69 }
70
71 return ((item->offset + item->step * index + item->in_step_offset) /
72 typesize);
73}
74
75static inline u16 __mlxsw_item_get16(char *buf, struct mlxsw_item *item,
76 unsigned short index)
77{
78 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16));
79 __be16 *b = (__be16 *) buf;
80 u16 tmp;
81
82 tmp = be16_to_cpu(b[offset]);
83 tmp >>= item->shift;
84 tmp &= GENMASK(item->size.bits - 1, 0);
85 if (item->no_real_shift)
86 tmp <<= item->shift;
87 return tmp;
88}
89
90static inline void __mlxsw_item_set16(char *buf, struct mlxsw_item *item,
91 unsigned short index, u16 val)
92{
93 unsigned int offset = __mlxsw_item_offset(item, index,
94 sizeof(u16));
95 __be16 *b = (__be16 *) buf;
96 u16 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
97 u16 tmp;
98
99 if (!item->no_real_shift)
100 val <<= item->shift;
101 val &= mask;
102 tmp = be16_to_cpu(b[offset]);
103 tmp &= ~mask;
104 tmp |= val;
105 b[offset] = cpu_to_be16(tmp);
106}
107
108static inline u32 __mlxsw_item_get32(char *buf, struct mlxsw_item *item,
109 unsigned short index)
110{
111 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32));
112 __be32 *b = (__be32 *) buf;
113 u32 tmp;
114
115 tmp = be32_to_cpu(b[offset]);
116 tmp >>= item->shift;
117 tmp &= GENMASK(item->size.bits - 1, 0);
118 if (item->no_real_shift)
119 tmp <<= item->shift;
120 return tmp;
121}
122
123static inline void __mlxsw_item_set32(char *buf, struct mlxsw_item *item,
124 unsigned short index, u32 val)
125{
126 unsigned int offset = __mlxsw_item_offset(item, index,
127 sizeof(u32));
128 __be32 *b = (__be32 *) buf;
129 u32 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
130 u32 tmp;
131
132 if (!item->no_real_shift)
133 val <<= item->shift;
134 val &= mask;
135 tmp = be32_to_cpu(b[offset]);
136 tmp &= ~mask;
137 tmp |= val;
138 b[offset] = cpu_to_be32(tmp);
139}
140
141static inline u64 __mlxsw_item_get64(char *buf, struct mlxsw_item *item,
142 unsigned short index)
143{
144 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
145 __be64 *b = (__be64 *) buf;
146 u64 tmp;
147
148 tmp = be64_to_cpu(b[offset]);
149 tmp >>= item->shift;
150 tmp &= GENMASK_ULL(item->size.bits - 1, 0);
151 if (item->no_real_shift)
152 tmp <<= item->shift;
153 return tmp;
154}
155
156static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item,
157 unsigned short index, u64 val)
158{
159 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
160 __be64 *b = (__be64 *) buf;
161 u64 mask = GENMASK_ULL(item->size.bits - 1, 0) << item->shift;
162 u64 tmp;
163
164 if (!item->no_real_shift)
165 val <<= item->shift;
166 val &= mask;
167 tmp = be64_to_cpu(b[offset]);
168 tmp &= ~mask;
169 tmp |= val;
170 b[offset] = cpu_to_be64(tmp);
171}
172
173static inline void __mlxsw_item_memcpy_from(char *buf, char *dst,
174 struct mlxsw_item *item)
175{
176 memcpy(dst, &buf[item->offset], item->size.bytes);
177}
178
179static inline void __mlxsw_item_memcpy_to(char *buf, char *src,
180 struct mlxsw_item *item)
181{
182 memcpy(&buf[item->offset], src, item->size.bytes);
183}
184
185static inline u16
186__mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
187{
188 u16 max_index, be_index;
189 u16 offset; /* byte offset inside the array */
190
191 BUG_ON(index && !item->element_size);
192 if (item->offset % sizeof(u32) != 0 ||
193 BITS_PER_BYTE % item->element_size != 0) {
194 pr_err("mlxsw: item bug (name=%s,offset=%x,element_size=%x)\n",
195 item->name, item->offset, item->element_size);
196 BUG();
197 }
198
199 max_index = (item->size.bytes << 3) / item->element_size - 1;
200 be_index = max_index - index;
201 offset = be_index * item->element_size >> 3;
202 *shift = index % (BITS_PER_BYTE / item->element_size) << 1;
203
204 return item->offset + offset;
205}
206
207static inline u8 __mlxsw_item_bit_array_get(char *buf, struct mlxsw_item *item,
208 u16 index)
209{
210 u8 shift, tmp;
211 u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
212
213 tmp = buf[offset];
214 tmp >>= shift;
215 tmp &= GENMASK(item->element_size - 1, 0);
216 return tmp;
217}
218
219static inline void __mlxsw_item_bit_array_set(char *buf, struct mlxsw_item *item,
220 u16 index, u8 val)
221{
222 u8 shift, tmp;
223 u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
224 u8 mask = GENMASK(item->element_size - 1, 0) << shift;
225
226 val <<= shift;
227 val &= mask;
228 tmp = buf[offset];
229 tmp &= ~mask;
230 tmp |= val;
231 buf[offset] = tmp;
232}
233
234#define __ITEM_NAME(_type, _cname, _iname) \
235 mlxsw_##_type##_##_cname##_##_iname##_item
236
237/* _type: cmd_mbox, reg, etc.
238 * _cname: containter name (e.g. command name, register name)
239 * _iname: item name within the container
240 */
241
242#define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits) \
243static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
244 .offset = _offset, \
245 .shift = _shift, \
246 .size = {.bits = _sizebits,}, \
247 .name = #_type "_" #_cname "_" #_iname, \
248}; \
249static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \
250{ \
251 return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
252} \
253static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\
254{ \
255 __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
256}
257
258#define MLXSW_ITEM16_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
259 _step, _instepoffset, _norealshift) \
260static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
261 .offset = _offset, \
262 .step = _step, \
263 .in_step_offset = _instepoffset, \
264 .shift = _shift, \
265 .no_real_shift = _norealshift, \
266 .size = {.bits = _sizebits,}, \
267 .name = #_type "_" #_cname "_" #_iname, \
268}; \
269static inline u16 \
270mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \
271{ \
272 return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \
273 index); \
274} \
275static inline void \
276mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
277 u16 val) \
278{ \
279 __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), \
280 index, val); \
281}
282
283#define MLXSW_ITEM32(_type, _cname, _iname, _offset, _shift, _sizebits) \
284static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
285 .offset = _offset, \
286 .shift = _shift, \
287 .size = {.bits = _sizebits,}, \
288 .name = #_type "_" #_cname "_" #_iname, \
289}; \
290static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \
291{ \
292 return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
293} \
294static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\
295{ \
296 __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
297}
298
299#define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
300 _step, _instepoffset, _norealshift) \
301static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
302 .offset = _offset, \
303 .step = _step, \
304 .in_step_offset = _instepoffset, \
305 .shift = _shift, \
306 .no_real_shift = _norealshift, \
307 .size = {.bits = _sizebits,}, \
308 .name = #_type "_" #_cname "_" #_iname, \
309}; \
310static inline u32 \
311mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \
312{ \
313 return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \
314 index); \
315} \
316static inline void \
317mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
318 u32 val) \
319{ \
320 __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), \
321 index, val); \
322}
323
324#define MLXSW_ITEM64(_type, _cname, _iname, _offset, _shift, _sizebits) \
325static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
326 .offset = _offset, \
327 .shift = _shift, \
328 .size = {.bits = _sizebits,}, \
329 .name = #_type "_" #_cname "_" #_iname, \
330}; \
331static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \
332{ \
333 return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
334} \
335static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\
336{ \
337 __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
338}
339
340#define MLXSW_ITEM64_INDEXED(_type, _cname, _iname, _offset, _shift, \
341 _sizebits, _step, _instepoffset, _norealshift) \
342static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
343 .offset = _offset, \
344 .step = _step, \
345 .in_step_offset = _instepoffset, \
346 .shift = _shift, \
347 .no_real_shift = _norealshift, \
348 .size = {.bits = _sizebits,}, \
349 .name = #_type "_" #_cname "_" #_iname, \
350}; \
351static inline u64 \
352mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \
353{ \
354 return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \
355 index); \
356} \
357static inline void \
358mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
359 u64 val) \
360{ \
361 __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), \
362 index, val); \
363}
364
365#define MLXSW_ITEM_BUF(_type, _cname, _iname, _offset, _sizebytes) \
366static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
367 .offset = _offset, \
368 .size = {.bytes = _sizebytes,}, \
369 .name = #_type "_" #_cname "_" #_iname, \
370}; \
371static inline void \
372mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst) \
373{ \
374 __mlxsw_item_memcpy_from(buf, dst, &__ITEM_NAME(_type, _cname, _iname));\
375} \
376static inline void \
377mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, char *src) \
378{ \
379 __mlxsw_item_memcpy_to(buf, src, &__ITEM_NAME(_type, _cname, _iname)); \
380}
381
382#define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \
383 _element_size) \
384static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
385 .offset = _offset, \
386 .element_size = _element_size, \
387 .size = {.bytes = _sizebytes,}, \
388 .name = #_type "_" #_cname "_" #_iname, \
389}; \
390static inline u8 \
391mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, u16 index) \
392{ \
393 return __mlxsw_item_bit_array_get(buf, \
394 &__ITEM_NAME(_type, _cname, _iname), \
395 index); \
396} \
397static inline void \
398mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val) \
399{ \
400 return __mlxsw_item_bit_array_set(buf, \
401 &__ITEM_NAME(_type, _cname, _iname), \
402 index, val); \
403} \
404
405#endif