]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/drivers/net/enic/base/cq_desc.h
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / dpdk / drivers / net / enic / base / cq_desc.h
1 /*
2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * Copyright (c) 2014, Cisco Systems, Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 *
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #ifndef _CQ_DESC_H_
36 #define _CQ_DESC_H_
37
38 /*
39 * Completion queue descriptor types
40 */
41 enum cq_desc_types {
42 CQ_DESC_TYPE_WQ_ENET = 0,
43 CQ_DESC_TYPE_DESC_COPY = 1,
44 CQ_DESC_TYPE_WQ_EXCH = 2,
45 CQ_DESC_TYPE_RQ_ENET = 3,
46 CQ_DESC_TYPE_RQ_FCP = 4,
47 CQ_DESC_TYPE_IOMMU_MISS = 5,
48 CQ_DESC_TYPE_SGL = 6,
49 CQ_DESC_TYPE_CLASSIFIER = 7,
50 CQ_DESC_TYPE_TEST = 127,
51 };
52
53 /* Completion queue descriptor: 16B
54 *
55 * All completion queues have this basic layout. The
56 * type_specfic area is unique for each completion
57 * queue type.
58 */
59 struct cq_desc {
60 __le16 completed_index;
61 __le16 q_number;
62 u8 type_specfic[11];
63 u8 type_color;
64 };
65
66 #define CQ_DESC_TYPE_BITS 4
67 #define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
68 #define CQ_DESC_COLOR_MASK 1
69 #define CQ_DESC_COLOR_SHIFT 7
70 #define CQ_DESC_Q_NUM_BITS 10
71 #define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
72 #define CQ_DESC_COMP_NDX_BITS 12
73 #define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
74
75 static inline void cq_color_enc(struct cq_desc *desc, const u8 color)
76 {
77 if (color)
78 desc->type_color |= (1 << CQ_DESC_COLOR_SHIFT);
79 else
80 desc->type_color &= ~(1 << CQ_DESC_COLOR_SHIFT);
81 }
82
83 static inline void cq_desc_enc(struct cq_desc *desc,
84 const u8 type, const u8 color, const u16 q_number,
85 const u16 completed_index)
86 {
87 desc->type_color = (type & CQ_DESC_TYPE_MASK) |
88 ((color & CQ_DESC_COLOR_MASK) << CQ_DESC_COLOR_SHIFT);
89 desc->q_number = cpu_to_le16(q_number & CQ_DESC_Q_NUM_MASK);
90 desc->completed_index = cpu_to_le16(completed_index &
91 CQ_DESC_COMP_NDX_MASK);
92 }
93
94 static inline void cq_desc_dec(const struct cq_desc *desc_arg,
95 u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
96 {
97 const struct cq_desc *desc = desc_arg;
98 const u8 type_color = desc->type_color;
99
100 *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
101
102 /*
103 * Make sure color bit is read from desc *before* other fields
104 * are read from desc. Hardware guarantees color bit is last
105 * bit (byte) written. Adding the rmb() prevents the compiler
106 * and/or CPU from reordering the reads which would potentially
107 * result in reading stale values.
108 */
109
110 rmb();
111
112 *type = type_color & CQ_DESC_TYPE_MASK;
113 *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
114 *completed_index = le16_to_cpu(desc->completed_index) &
115 CQ_DESC_COMP_NDX_MASK;
116 }
117
118 static inline void cq_color_dec(const struct cq_desc *desc_arg, u8 *color)
119 {
120 volatile const struct cq_desc *desc = desc_arg;
121
122 *color = (desc->type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
123 }
124
125 #endif /* _CQ_DESC_H_ */