]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/mellanox/mlxsw/item.h
Merge branch 'drm-fixes-4.10' of git://people.freedesktop.org/~agd5f/linux into drm...
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / item.h
CommitLineData
93c1edb2
JP
1/*
2 * drivers/net/ethernet/mellanox/mlxsw/item.h
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the names of the copyright holders nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * Alternatively, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") version 2 as published by the Free
21 * Software Foundation.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 */
35
36#ifndef _MLXSW_ITEM_H
37#define _MLXSW_ITEM_H
38
39#include <linux/types.h>
40#include <linux/string.h>
41#include <linux/bitops.h>
42
43struct mlxsw_item {
44 unsigned short offset; /* bytes in container */
45 unsigned short step; /* step in bytes for indexed items */
46 unsigned short in_step_offset; /* offset within one step */
47 unsigned char shift; /* shift in bits */
48 unsigned char element_size; /* size of element in bit array */
49 bool no_real_shift;
50 union {
51 unsigned char bits;
52 unsigned short bytes;
53 } size;
54 const char *name;
55};
56
57static inline unsigned int
fe0612dc 58__mlxsw_item_offset(const struct mlxsw_item *item, unsigned short index,
93c1edb2
JP
59 size_t typesize)
60{
61 BUG_ON(index && !item->step);
62 if (item->offset % typesize != 0 ||
63 item->step % typesize != 0 ||
64 item->in_step_offset % typesize != 0) {
ed8db18d 65 pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%zx)\n",
93c1edb2
JP
66 item->name, item->offset, item->step,
67 item->in_step_offset, typesize);
68 BUG();
69 }
70
71 return ((item->offset + item->step * index + item->in_step_offset) /
72 typesize);
73}
74
412791df
JP
75static inline u16 __mlxsw_item_get16(const char *buf,
76 const struct mlxsw_item *item,
93c1edb2
JP
77 unsigned short index)
78{
79 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16));
80 __be16 *b = (__be16 *) buf;
81 u16 tmp;
82
83 tmp = be16_to_cpu(b[offset]);
84 tmp >>= item->shift;
85 tmp &= GENMASK(item->size.bits - 1, 0);
86 if (item->no_real_shift)
87 tmp <<= item->shift;
88 return tmp;
89}
90
fe0612dc 91static inline void __mlxsw_item_set16(char *buf, const struct mlxsw_item *item,
93c1edb2
JP
92 unsigned short index, u16 val)
93{
94 unsigned int offset = __mlxsw_item_offset(item, index,
95 sizeof(u16));
96 __be16 *b = (__be16 *) buf;
97 u16 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
98 u16 tmp;
99
100 if (!item->no_real_shift)
101 val <<= item->shift;
102 val &= mask;
103 tmp = be16_to_cpu(b[offset]);
104 tmp &= ~mask;
105 tmp |= val;
106 b[offset] = cpu_to_be16(tmp);
107}
108
412791df
JP
109static inline u32 __mlxsw_item_get32(const char *buf,
110 const struct mlxsw_item *item,
93c1edb2
JP
111 unsigned short index)
112{
113 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32));
114 __be32 *b = (__be32 *) buf;
115 u32 tmp;
116
117 tmp = be32_to_cpu(b[offset]);
118 tmp >>= item->shift;
119 tmp &= GENMASK(item->size.bits - 1, 0);
120 if (item->no_real_shift)
121 tmp <<= item->shift;
122 return tmp;
123}
124
fe0612dc 125static inline void __mlxsw_item_set32(char *buf, const struct mlxsw_item *item,
93c1edb2
JP
126 unsigned short index, u32 val)
127{
128 unsigned int offset = __mlxsw_item_offset(item, index,
129 sizeof(u32));
130 __be32 *b = (__be32 *) buf;
131 u32 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
132 u32 tmp;
133
134 if (!item->no_real_shift)
135 val <<= item->shift;
136 val &= mask;
137 tmp = be32_to_cpu(b[offset]);
138 tmp &= ~mask;
139 tmp |= val;
140 b[offset] = cpu_to_be32(tmp);
141}
142
412791df
JP
143static inline u64 __mlxsw_item_get64(const char *buf,
144 const struct mlxsw_item *item,
93c1edb2
JP
145 unsigned short index)
146{
147 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
148 __be64 *b = (__be64 *) buf;
149 u64 tmp;
150
151 tmp = be64_to_cpu(b[offset]);
152 tmp >>= item->shift;
153 tmp &= GENMASK_ULL(item->size.bits - 1, 0);
154 if (item->no_real_shift)
155 tmp <<= item->shift;
156 return tmp;
157}
158
fe0612dc 159static inline void __mlxsw_item_set64(char *buf, const struct mlxsw_item *item,
93c1edb2
JP
160 unsigned short index, u64 val)
161{
162 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
163 __be64 *b = (__be64 *) buf;
164 u64 mask = GENMASK_ULL(item->size.bits - 1, 0) << item->shift;
165 u64 tmp;
166
167 if (!item->no_real_shift)
168 val <<= item->shift;
169 val &= mask;
170 tmp = be64_to_cpu(b[offset]);
171 tmp &= ~mask;
172 tmp |= val;
173 b[offset] = cpu_to_be64(tmp);
174}
175
412791df 176static inline void __mlxsw_item_memcpy_from(const char *buf, char *dst,
fe0612dc 177 const struct mlxsw_item *item,
d64b1592 178 unsigned short index)
93c1edb2 179{
d64b1592
JP
180 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
181
182 memcpy(dst, &buf[offset], item->size.bytes);
93c1edb2
JP
183}
184
7b0989b5 185static inline void __mlxsw_item_memcpy_to(char *buf, const char *src,
fe0612dc 186 const struct mlxsw_item *item,
d64b1592 187 unsigned short index)
93c1edb2 188{
d64b1592
JP
189 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
190
191 memcpy(&buf[offset], src, item->size.bytes);
93c1edb2
JP
192}
193
194static inline u16
fe0612dc
JP
195__mlxsw_item_bit_array_offset(const struct mlxsw_item *item,
196 u16 index, u8 *shift)
93c1edb2
JP
197{
198 u16 max_index, be_index;
199 u16 offset; /* byte offset inside the array */
bee1f753 200 u8 in_byte_index;
93c1edb2
JP
201
202 BUG_ON(index && !item->element_size);
203 if (item->offset % sizeof(u32) != 0 ||
204 BITS_PER_BYTE % item->element_size != 0) {
205 pr_err("mlxsw: item bug (name=%s,offset=%x,element_size=%x)\n",
206 item->name, item->offset, item->element_size);
207 BUG();
208 }
209
210 max_index = (item->size.bytes << 3) / item->element_size - 1;
211 be_index = max_index - index;
212 offset = be_index * item->element_size >> 3;
bee1f753
IS
213 in_byte_index = index % (BITS_PER_BYTE / item->element_size);
214 *shift = in_byte_index * item->element_size;
93c1edb2
JP
215
216 return item->offset + offset;
217}
218
412791df 219static inline u8 __mlxsw_item_bit_array_get(const char *buf,
fe0612dc 220 const struct mlxsw_item *item,
93c1edb2
JP
221 u16 index)
222{
223 u8 shift, tmp;
224 u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
225
226 tmp = buf[offset];
227 tmp >>= shift;
228 tmp &= GENMASK(item->element_size - 1, 0);
229 return tmp;
230}
231
fe0612dc
JP
232static inline void __mlxsw_item_bit_array_set(char *buf,
233 const struct mlxsw_item *item,
93c1edb2
JP
234 u16 index, u8 val)
235{
236 u8 shift, tmp;
237 u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
238 u8 mask = GENMASK(item->element_size - 1, 0) << shift;
239
240 val <<= shift;
241 val &= mask;
242 tmp = buf[offset];
243 tmp &= ~mask;
244 tmp |= val;
245 buf[offset] = tmp;
246}
247
248#define __ITEM_NAME(_type, _cname, _iname) \
249 mlxsw_##_type##_##_cname##_##_iname##_item
250
251/* _type: cmd_mbox, reg, etc.
252 * _cname: containter name (e.g. command name, register name)
253 * _iname: item name within the container
254 */
255
256#define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits) \
257static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
258 .offset = _offset, \
259 .shift = _shift, \
260 .size = {.bits = _sizebits,}, \
261 .name = #_type "_" #_cname "_" #_iname, \
262}; \
412791df 263static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
93c1edb2
JP
264{ \
265 return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
266} \
267static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\
268{ \
269 __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
270}
271
272#define MLXSW_ITEM16_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
273 _step, _instepoffset, _norealshift) \
274static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
275 .offset = _offset, \
276 .step = _step, \
277 .in_step_offset = _instepoffset, \
278 .shift = _shift, \
279 .no_real_shift = _norealshift, \
280 .size = {.bits = _sizebits,}, \
281 .name = #_type "_" #_cname "_" #_iname, \
282}; \
283static inline u16 \
412791df 284mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
93c1edb2
JP
285{ \
286 return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \
287 index); \
288} \
289static inline void \
290mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
291 u16 val) \
292{ \
293 __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), \
294 index, val); \
295}
296
297#define MLXSW_ITEM32(_type, _cname, _iname, _offset, _shift, _sizebits) \
298static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
299 .offset = _offset, \
300 .shift = _shift, \
301 .size = {.bits = _sizebits,}, \
302 .name = #_type "_" #_cname "_" #_iname, \
303}; \
412791df 304static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
93c1edb2
JP
305{ \
306 return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
307} \
308static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\
309{ \
310 __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
311}
312
313#define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
314 _step, _instepoffset, _norealshift) \
315static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
316 .offset = _offset, \
317 .step = _step, \
318 .in_step_offset = _instepoffset, \
319 .shift = _shift, \
320 .no_real_shift = _norealshift, \
321 .size = {.bits = _sizebits,}, \
322 .name = #_type "_" #_cname "_" #_iname, \
323}; \
324static inline u32 \
412791df 325mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
93c1edb2
JP
326{ \
327 return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \
328 index); \
329} \
330static inline void \
331mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
332 u32 val) \
333{ \
334 __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), \
335 index, val); \
336}
337
338#define MLXSW_ITEM64(_type, _cname, _iname, _offset, _shift, _sizebits) \
339static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
340 .offset = _offset, \
341 .shift = _shift, \
342 .size = {.bits = _sizebits,}, \
343 .name = #_type "_" #_cname "_" #_iname, \
344}; \
412791df 345static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
93c1edb2
JP
346{ \
347 return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
348} \
349static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\
350{ \
351 __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
352}
353
354#define MLXSW_ITEM64_INDEXED(_type, _cname, _iname, _offset, _shift, \
355 _sizebits, _step, _instepoffset, _norealshift) \
356static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
357 .offset = _offset, \
358 .step = _step, \
359 .in_step_offset = _instepoffset, \
360 .shift = _shift, \
361 .no_real_shift = _norealshift, \
362 .size = {.bits = _sizebits,}, \
363 .name = #_type "_" #_cname "_" #_iname, \
364}; \
365static inline u64 \
412791df 366mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
93c1edb2
JP
367{ \
368 return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \
369 index); \
370} \
371static inline void \
372mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
373 u64 val) \
374{ \
375 __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), \
376 index, val); \
377}
378
379#define MLXSW_ITEM_BUF(_type, _cname, _iname, _offset, _sizebytes) \
380static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
381 .offset = _offset, \
382 .size = {.bytes = _sizebytes,}, \
383 .name = #_type "_" #_cname "_" #_iname, \
384}; \
385static inline void \
412791df 386mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, char *dst) \
93c1edb2 387{ \
d64b1592
JP
388 __mlxsw_item_memcpy_from(buf, dst, \
389 &__ITEM_NAME(_type, _cname, _iname), 0); \
93c1edb2
JP
390} \
391static inline void \
7b0989b5 392mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \
93c1edb2 393{ \
d64b1592
JP
394 __mlxsw_item_memcpy_to(buf, src, \
395 &__ITEM_NAME(_type, _cname, _iname), 0); \
396}
397
398#define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes, \
399 _step, _instepoffset) \
400static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
401 .offset = _offset, \
402 .step = _step, \
403 .in_step_offset = _instepoffset, \
404 .size = {.bytes = _sizebytes,}, \
405 .name = #_type "_" #_cname "_" #_iname, \
406}; \
407static inline void \
412791df 408mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, \
d64b1592
JP
409 unsigned short index, \
410 char *dst) \
411{ \
412 __mlxsw_item_memcpy_from(buf, dst, \
413 &__ITEM_NAME(_type, _cname, _iname), index); \
414} \
415static inline void \
416mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \
417 unsigned short index, \
418 const char *src) \
419{ \
420 __mlxsw_item_memcpy_to(buf, src, \
421 &__ITEM_NAME(_type, _cname, _iname), index); \
93c1edb2
JP
422}
423
424#define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \
425 _element_size) \
426static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
427 .offset = _offset, \
428 .element_size = _element_size, \
429 .size = {.bytes = _sizebytes,}, \
430 .name = #_type "_" #_cname "_" #_iname, \
431}; \
432static inline u8 \
412791df 433mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, u16 index) \
93c1edb2
JP
434{ \
435 return __mlxsw_item_bit_array_get(buf, \
436 &__ITEM_NAME(_type, _cname, _iname), \
437 index); \
438} \
439static inline void \
440mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val) \
441{ \
442 return __mlxsw_item_bit_array_set(buf, \
443 &__ITEM_NAME(_type, _cname, _iname), \
444 index, val); \
445} \
446
447#endif