]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/mellanox/mlxsw/item.h
09f35de0a371097bc6b6305a75a972706c7a5e96
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / item.h
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/item.h
3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the names of the copyright holders nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * Alternatively, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") version 2 as published by the Free
21 * Software Foundation.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 #ifndef _MLXSW_ITEM_H
37 #define _MLXSW_ITEM_H
38
39 #include <linux/types.h>
40 #include <linux/string.h>
41 #include <linux/bitops.h>
42
43 struct mlxsw_item {
44 unsigned short offset; /* bytes in container */
45 unsigned short step; /* step in bytes for indexed items */
46 unsigned short in_step_offset; /* offset within one step */
47 unsigned char shift; /* shift in bits */
48 unsigned char element_size; /* size of element in bit array */
49 bool no_real_shift;
50 union {
51 unsigned char bits;
52 unsigned short bytes;
53 } size;
54 const char *name;
55 };
56
57 static inline unsigned int
58 __mlxsw_item_offset(const struct mlxsw_item *item, unsigned short index,
59 size_t typesize)
60 {
61 BUG_ON(index && !item->step);
62 if (item->offset % typesize != 0 ||
63 item->step % typesize != 0 ||
64 item->in_step_offset % typesize != 0) {
65 pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%zx)\n",
66 item->name, item->offset, item->step,
67 item->in_step_offset, typesize);
68 BUG();
69 }
70
71 return ((item->offset + item->step * index + item->in_step_offset) /
72 typesize);
73 }
74
75 static inline u8 __mlxsw_item_get8(const char *buf,
76 const struct mlxsw_item *item,
77 unsigned short index)
78 {
79 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u8));
80 u8 *b = (u8 *) buf;
81 u8 tmp;
82
83 tmp = b[offset];
84 tmp >>= item->shift;
85 tmp &= GENMASK(item->size.bits - 1, 0);
86 if (item->no_real_shift)
87 tmp <<= item->shift;
88 return tmp;
89 }
90
91 static inline void __mlxsw_item_set8(char *buf, const struct mlxsw_item *item,
92 unsigned short index, u8 val)
93 {
94 unsigned int offset = __mlxsw_item_offset(item, index,
95 sizeof(u8));
96 u8 *b = (u8 *) buf;
97 u8 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
98 u8 tmp;
99
100 if (!item->no_real_shift)
101 val <<= item->shift;
102 val &= mask;
103 tmp = b[offset];
104 tmp &= ~mask;
105 tmp |= val;
106 b[offset] = tmp;
107 }
108
109 static inline u16 __mlxsw_item_get16(const char *buf,
110 const struct mlxsw_item *item,
111 unsigned short index)
112 {
113 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16));
114 __be16 *b = (__be16 *) buf;
115 u16 tmp;
116
117 tmp = be16_to_cpu(b[offset]);
118 tmp >>= item->shift;
119 tmp &= GENMASK(item->size.bits - 1, 0);
120 if (item->no_real_shift)
121 tmp <<= item->shift;
122 return tmp;
123 }
124
125 static inline void __mlxsw_item_set16(char *buf, const struct mlxsw_item *item,
126 unsigned short index, u16 val)
127 {
128 unsigned int offset = __mlxsw_item_offset(item, index,
129 sizeof(u16));
130 __be16 *b = (__be16 *) buf;
131 u16 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
132 u16 tmp;
133
134 if (!item->no_real_shift)
135 val <<= item->shift;
136 val &= mask;
137 tmp = be16_to_cpu(b[offset]);
138 tmp &= ~mask;
139 tmp |= val;
140 b[offset] = cpu_to_be16(tmp);
141 }
142
143 static inline u32 __mlxsw_item_get32(const char *buf,
144 const struct mlxsw_item *item,
145 unsigned short index)
146 {
147 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32));
148 __be32 *b = (__be32 *) buf;
149 u32 tmp;
150
151 tmp = be32_to_cpu(b[offset]);
152 tmp >>= item->shift;
153 tmp &= GENMASK(item->size.bits - 1, 0);
154 if (item->no_real_shift)
155 tmp <<= item->shift;
156 return tmp;
157 }
158
159 static inline void __mlxsw_item_set32(char *buf, const struct mlxsw_item *item,
160 unsigned short index, u32 val)
161 {
162 unsigned int offset = __mlxsw_item_offset(item, index,
163 sizeof(u32));
164 __be32 *b = (__be32 *) buf;
165 u32 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
166 u32 tmp;
167
168 if (!item->no_real_shift)
169 val <<= item->shift;
170 val &= mask;
171 tmp = be32_to_cpu(b[offset]);
172 tmp &= ~mask;
173 tmp |= val;
174 b[offset] = cpu_to_be32(tmp);
175 }
176
177 static inline u64 __mlxsw_item_get64(const char *buf,
178 const struct mlxsw_item *item,
179 unsigned short index)
180 {
181 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
182 __be64 *b = (__be64 *) buf;
183 u64 tmp;
184
185 tmp = be64_to_cpu(b[offset]);
186 tmp >>= item->shift;
187 tmp &= GENMASK_ULL(item->size.bits - 1, 0);
188 if (item->no_real_shift)
189 tmp <<= item->shift;
190 return tmp;
191 }
192
193 static inline void __mlxsw_item_set64(char *buf, const struct mlxsw_item *item,
194 unsigned short index, u64 val)
195 {
196 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
197 __be64 *b = (__be64 *) buf;
198 u64 mask = GENMASK_ULL(item->size.bits - 1, 0) << item->shift;
199 u64 tmp;
200
201 if (!item->no_real_shift)
202 val <<= item->shift;
203 val &= mask;
204 tmp = be64_to_cpu(b[offset]);
205 tmp &= ~mask;
206 tmp |= val;
207 b[offset] = cpu_to_be64(tmp);
208 }
209
210 static inline void __mlxsw_item_memcpy_from(const char *buf, char *dst,
211 const struct mlxsw_item *item,
212 unsigned short index)
213 {
214 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
215
216 memcpy(dst, &buf[offset], item->size.bytes);
217 }
218
219 static inline void __mlxsw_item_memcpy_to(char *buf, const char *src,
220 const struct mlxsw_item *item,
221 unsigned short index)
222 {
223 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
224
225 memcpy(&buf[offset], src, item->size.bytes);
226 }
227
228 static inline u16
229 __mlxsw_item_bit_array_offset(const struct mlxsw_item *item,
230 u16 index, u8 *shift)
231 {
232 u16 max_index, be_index;
233 u16 offset; /* byte offset inside the array */
234 u8 in_byte_index;
235
236 BUG_ON(index && !item->element_size);
237 if (item->offset % sizeof(u32) != 0 ||
238 BITS_PER_BYTE % item->element_size != 0) {
239 pr_err("mlxsw: item bug (name=%s,offset=%x,element_size=%x)\n",
240 item->name, item->offset, item->element_size);
241 BUG();
242 }
243
244 max_index = (item->size.bytes << 3) / item->element_size - 1;
245 be_index = max_index - index;
246 offset = be_index * item->element_size >> 3;
247 in_byte_index = index % (BITS_PER_BYTE / item->element_size);
248 *shift = in_byte_index * item->element_size;
249
250 return item->offset + offset;
251 }
252
253 static inline u8 __mlxsw_item_bit_array_get(const char *buf,
254 const struct mlxsw_item *item,
255 u16 index)
256 {
257 u8 shift, tmp;
258 u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
259
260 tmp = buf[offset];
261 tmp >>= shift;
262 tmp &= GENMASK(item->element_size - 1, 0);
263 return tmp;
264 }
265
266 static inline void __mlxsw_item_bit_array_set(char *buf,
267 const struct mlxsw_item *item,
268 u16 index, u8 val)
269 {
270 u8 shift, tmp;
271 u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
272 u8 mask = GENMASK(item->element_size - 1, 0) << shift;
273
274 val <<= shift;
275 val &= mask;
276 tmp = buf[offset];
277 tmp &= ~mask;
278 tmp |= val;
279 buf[offset] = tmp;
280 }
281
282 #define __ITEM_NAME(_type, _cname, _iname) \
283 mlxsw_##_type##_##_cname##_##_iname##_item
284
285 /* _type: cmd_mbox, reg, etc.
286 * _cname: containter name (e.g. command name, register name)
287 * _iname: item name within the container
288 */
289
290 #define MLXSW_ITEM8(_type, _cname, _iname, _offset, _shift, _sizebits) \
291 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
292 .offset = _offset, \
293 .shift = _shift, \
294 .size = {.bits = _sizebits,}, \
295 .name = #_type "_" #_cname "_" #_iname, \
296 }; \
297 static inline u8 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
298 { \
299 return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
300 } \
301 static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val)\
302 { \
303 __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
304 }
305
306 #define MLXSW_ITEM8_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
307 _step, _instepoffset, _norealshift) \
308 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
309 .offset = _offset, \
310 .step = _step, \
311 .in_step_offset = _instepoffset, \
312 .shift = _shift, \
313 .no_real_shift = _norealshift, \
314 .size = {.bits = _sizebits,}, \
315 .name = #_type "_" #_cname "_" #_iname, \
316 }; \
317 static inline u8 \
318 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
319 { \
320 return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), \
321 index); \
322 } \
323 static inline void \
324 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
325 u8 val) \
326 { \
327 __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), \
328 index, val); \
329 }
330
331 #define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits) \
332 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
333 .offset = _offset, \
334 .shift = _shift, \
335 .size = {.bits = _sizebits,}, \
336 .name = #_type "_" #_cname "_" #_iname, \
337 }; \
338 static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
339 { \
340 return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
341 } \
342 static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\
343 { \
344 __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
345 }
346
347 #define MLXSW_ITEM16_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
348 _step, _instepoffset, _norealshift) \
349 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
350 .offset = _offset, \
351 .step = _step, \
352 .in_step_offset = _instepoffset, \
353 .shift = _shift, \
354 .no_real_shift = _norealshift, \
355 .size = {.bits = _sizebits,}, \
356 .name = #_type "_" #_cname "_" #_iname, \
357 }; \
358 static inline u16 \
359 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
360 { \
361 return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \
362 index); \
363 } \
364 static inline void \
365 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
366 u16 val) \
367 { \
368 __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), \
369 index, val); \
370 }
371
372 #define MLXSW_ITEM32(_type, _cname, _iname, _offset, _shift, _sizebits) \
373 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
374 .offset = _offset, \
375 .shift = _shift, \
376 .size = {.bits = _sizebits,}, \
377 .name = #_type "_" #_cname "_" #_iname, \
378 }; \
379 static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
380 { \
381 return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
382 } \
383 static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\
384 { \
385 __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
386 }
387
388 #define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
389 _step, _instepoffset, _norealshift) \
390 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
391 .offset = _offset, \
392 .step = _step, \
393 .in_step_offset = _instepoffset, \
394 .shift = _shift, \
395 .no_real_shift = _norealshift, \
396 .size = {.bits = _sizebits,}, \
397 .name = #_type "_" #_cname "_" #_iname, \
398 }; \
399 static inline u32 \
400 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
401 { \
402 return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \
403 index); \
404 } \
405 static inline void \
406 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
407 u32 val) \
408 { \
409 __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), \
410 index, val); \
411 }
412
413 #define MLXSW_ITEM64(_type, _cname, _iname, _offset, _shift, _sizebits) \
414 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
415 .offset = _offset, \
416 .shift = _shift, \
417 .size = {.bits = _sizebits,}, \
418 .name = #_type "_" #_cname "_" #_iname, \
419 }; \
420 static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
421 { \
422 return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
423 } \
424 static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\
425 { \
426 __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
427 }
428
429 #define MLXSW_ITEM64_INDEXED(_type, _cname, _iname, _offset, _shift, \
430 _sizebits, _step, _instepoffset, _norealshift) \
431 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
432 .offset = _offset, \
433 .step = _step, \
434 .in_step_offset = _instepoffset, \
435 .shift = _shift, \
436 .no_real_shift = _norealshift, \
437 .size = {.bits = _sizebits,}, \
438 .name = #_type "_" #_cname "_" #_iname, \
439 }; \
440 static inline u64 \
441 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
442 { \
443 return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \
444 index); \
445 } \
446 static inline void \
447 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
448 u64 val) \
449 { \
450 __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), \
451 index, val); \
452 }
453
454 #define MLXSW_ITEM_BUF(_type, _cname, _iname, _offset, _sizebytes) \
455 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
456 .offset = _offset, \
457 .size = {.bytes = _sizebytes,}, \
458 .name = #_type "_" #_cname "_" #_iname, \
459 }; \
460 static inline void \
461 mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, char *dst) \
462 { \
463 __mlxsw_item_memcpy_from(buf, dst, \
464 &__ITEM_NAME(_type, _cname, _iname), 0); \
465 } \
466 static inline void \
467 mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \
468 { \
469 __mlxsw_item_memcpy_to(buf, src, \
470 &__ITEM_NAME(_type, _cname, _iname), 0); \
471 }
472
473 #define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes, \
474 _step, _instepoffset) \
475 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
476 .offset = _offset, \
477 .step = _step, \
478 .in_step_offset = _instepoffset, \
479 .size = {.bytes = _sizebytes,}, \
480 .name = #_type "_" #_cname "_" #_iname, \
481 }; \
482 static inline void \
483 mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, \
484 unsigned short index, \
485 char *dst) \
486 { \
487 __mlxsw_item_memcpy_from(buf, dst, \
488 &__ITEM_NAME(_type, _cname, _iname), index); \
489 } \
490 static inline void \
491 mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \
492 unsigned short index, \
493 const char *src) \
494 { \
495 __mlxsw_item_memcpy_to(buf, src, \
496 &__ITEM_NAME(_type, _cname, _iname), index); \
497 }
498
499 #define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \
500 _element_size) \
501 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
502 .offset = _offset, \
503 .element_size = _element_size, \
504 .size = {.bytes = _sizebytes,}, \
505 .name = #_type "_" #_cname "_" #_iname, \
506 }; \
507 static inline u8 \
508 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, u16 index) \
509 { \
510 return __mlxsw_item_bit_array_get(buf, \
511 &__ITEM_NAME(_type, _cname, _iname), \
512 index); \
513 } \
514 static inline void \
515 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val) \
516 { \
517 return __mlxsw_item_bit_array_set(buf, \
518 &__ITEM_NAME(_type, _cname, _iname), \
519 index, val); \
520 } \
521
522 #endif