]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /* |
2 | * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. | |
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | |
4 | * | |
5 | * Copyright (c) 2014, Cisco Systems, Inc. | |
6 | * All rights reserved. | |
7 | * | |
8 | * Redistribution and use in source and binary forms, with or without | |
9 | * modification, are permitted provided that the following conditions | |
10 | * are met: | |
11 | * | |
12 | * 1. Redistributions of source code must retain the above copyright | |
13 | * notice, this list of conditions and the following disclaimer. | |
14 | * | |
15 | * 2. Redistributions in binary form must reproduce the above copyright | |
16 | * notice, this list of conditions and the following disclaimer in | |
17 | * the documentation and/or other materials provided with the | |
18 | * distribution. | |
19 | * | |
20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
23 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | |
24 | * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | |
25 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | |
26 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
27 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | |
28 | * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
29 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | |
30 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
31 | * POSSIBILITY OF SUCH DAMAGE. | |
32 | * | |
33 | */ | |
34 | ||
35 | #ifndef _VNIC_WQ_H_ | |
36 | #define _VNIC_WQ_H_ | |
37 | ||
38 | ||
39 | #include "vnic_dev.h" | |
40 | #include "vnic_cq.h" | |
41 | #include <rte_memzone.h> | |
42 | ||
43 | /* Work queue control */ | |
44 | struct vnic_wq_ctrl { | |
45 | u64 ring_base; /* 0x00 */ | |
46 | u32 ring_size; /* 0x08 */ | |
47 | u32 pad0; | |
48 | u32 posted_index; /* 0x10 */ | |
49 | u32 pad1; | |
50 | u32 cq_index; /* 0x18 */ | |
51 | u32 pad2; | |
52 | u32 enable; /* 0x20 */ | |
53 | u32 pad3; | |
54 | u32 running; /* 0x28 */ | |
55 | u32 pad4; | |
56 | u32 fetch_index; /* 0x30 */ | |
57 | u32 pad5; | |
58 | u32 dca_value; /* 0x38 */ | |
59 | u32 pad6; | |
60 | u32 error_interrupt_enable; /* 0x40 */ | |
61 | u32 pad7; | |
62 | u32 error_interrupt_offset; /* 0x48 */ | |
63 | u32 pad8; | |
64 | u32 error_status; /* 0x50 */ | |
65 | u32 pad9; | |
66 | }; | |
67 | ||
68 | /* 16 bytes */ | |
69 | struct vnic_wq_buf { | |
70 | struct rte_mempool *pool; | |
71 | void *mb; | |
72 | }; | |
73 | ||
74 | struct vnic_wq { | |
75 | unsigned int index; | |
76 | struct vnic_dev *vdev; | |
77 | struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ | |
78 | struct vnic_dev_ring ring; | |
79 | struct vnic_wq_buf *bufs; | |
80 | unsigned int head_idx; | |
81 | unsigned int tail_idx; | |
82 | unsigned int socket_id; | |
83 | const struct rte_memzone *cqmsg_rz; | |
84 | uint16_t last_completed_index; | |
85 | }; | |
86 | ||
87 | static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) | |
88 | { | |
89 | /* how many does SW own? */ | |
90 | return wq->ring.desc_avail; | |
91 | } | |
92 | ||
93 | static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) | |
94 | { | |
95 | /* how many does HW own? */ | |
96 | return wq->ring.desc_count - wq->ring.desc_avail - 1; | |
97 | } | |
98 | ||
99 | #define PI_LOG2_CACHE_LINE_SIZE 5 | |
100 | #define PI_INDEX_BITS 12 | |
101 | #define PI_INDEX_MASK ((1U << PI_INDEX_BITS) - 1) | |
102 | #define PI_PREFETCH_LEN_MASK ((1U << PI_LOG2_CACHE_LINE_SIZE) - 1) | |
103 | #define PI_PREFETCH_LEN_OFF 16 | |
104 | #define PI_PREFETCH_ADDR_BITS 43 | |
105 | #define PI_PREFETCH_ADDR_MASK ((1ULL << PI_PREFETCH_ADDR_BITS) - 1) | |
106 | #define PI_PREFETCH_ADDR_OFF 21 | |
107 | ||
108 | /** How many cache lines are touched by buffer (addr, len). */ | |
109 | static inline unsigned int num_cache_lines_touched(dma_addr_t addr, | |
110 | unsigned int len) | |
111 | { | |
112 | const unsigned long mask = PI_PREFETCH_LEN_MASK; | |
113 | const unsigned long laddr = (unsigned long)addr; | |
114 | unsigned long lines, equiv_len; | |
115 | /* A. If addr is aligned, our solution is just to round up len to the | |
116 | next boundary. | |
117 | ||
118 | e.g. addr = 0, len = 48 | |
119 | +--------------------+ | |
120 | |XXXXXXXXXXXXXXXXXXXX| 32-byte cacheline a | |
121 | +--------------------+ | |
122 | |XXXXXXXXXX | cacheline b | |
123 | +--------------------+ | |
124 | ||
125 | B. If addr is not aligned, however, we may use an extra | |
126 | cacheline. e.g. addr = 12, len = 22 | |
127 | ||
128 | +--------------------+ | |
129 | | XXXXXXXXXXXXX| | |
130 | +--------------------+ | |
131 | |XX | | |
132 | +--------------------+ | |
133 | ||
134 | Our solution is to make the problem equivalent to case A | |
135 | above by adding the empty space in the first cacheline to the length: | |
136 | unsigned long len; | |
137 | ||
138 | +--------------------+ | |
139 | |eeeeeeeXXXXXXXXXXXXX| "e" is empty space, which we add to len | |
140 | +--------------------+ | |
141 | |XX | | |
142 | +--------------------+ | |
143 | ||
144 | */ | |
145 | equiv_len = len + (laddr & mask); | |
146 | ||
147 | /* Now we can just round up this len to the next 32-byte boundary. */ | |
148 | lines = (equiv_len + mask) & (~mask); | |
149 | ||
150 | /* Scale bytes -> cachelines. */ | |
151 | return lines >> PI_LOG2_CACHE_LINE_SIZE; | |
152 | } | |
153 | ||
154 | static inline u64 vnic_cached_posted_index(dma_addr_t addr, unsigned int len, | |
155 | unsigned int index) | |
156 | { | |
157 | unsigned int num_cache_lines = num_cache_lines_touched(addr, len); | |
158 | /* Wish we could avoid a branch here. We could have separate | |
159 | * vnic_wq_post() and vinc_wq_post_inline(), the latter | |
160 | * only supporting < 1k (2^5 * 2^5) sends, I suppose. This would | |
161 | * eliminate the if (eop) branch as well. | |
162 | */ | |
163 | if (num_cache_lines > PI_PREFETCH_LEN_MASK) | |
164 | num_cache_lines = 0; | |
165 | return (index & PI_INDEX_MASK) | | |
166 | ((num_cache_lines & PI_PREFETCH_LEN_MASK) << PI_PREFETCH_LEN_OFF) | | |
167 | (((addr >> PI_LOG2_CACHE_LINE_SIZE) & | |
168 | PI_PREFETCH_ADDR_MASK) << PI_PREFETCH_ADDR_OFF); | |
169 | } | |
170 | ||
171 | static inline uint32_t | |
172 | buf_idx_incr(uint32_t n_descriptors, uint32_t idx) | |
173 | { | |
174 | idx++; | |
175 | if (unlikely(idx == n_descriptors)) | |
176 | idx = 0; | |
177 | return idx; | |
178 | } | |
179 | ||
180 | void vnic_wq_free(struct vnic_wq *wq); | |
181 | int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, | |
182 | unsigned int desc_count, unsigned int desc_size); | |
183 | void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, | |
184 | unsigned int fetch_index, unsigned int posted_index, | |
185 | unsigned int error_interrupt_enable, | |
186 | unsigned int error_interrupt_offset); | |
187 | void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, | |
188 | unsigned int error_interrupt_enable, | |
189 | unsigned int error_interrupt_offset); | |
190 | void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error); | |
191 | unsigned int vnic_wq_error_status(struct vnic_wq *wq); | |
192 | void vnic_wq_enable(struct vnic_wq *wq); | |
193 | int vnic_wq_disable(struct vnic_wq *wq); | |
194 | void vnic_wq_clean(struct vnic_wq *wq, | |
195 | void (*buf_clean)(struct vnic_wq_buf *buf)); | |
196 | #endif /* _VNIC_WQ_H_ */ |