]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/staging/octeon/cvmx-fpa.h
Fix common misspellings
[mirror_ubuntu-zesty-kernel.git] / drivers / staging / octeon / cvmx-fpa.h
1 /***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28 /**
29 * @file
30 *
31 * Interface to the hardware Free Pool Allocator.
32 *
33 *
34 */
35
36 #ifndef __CVMX_FPA_H__
37 #define __CVMX_FPA_H__
38
39 #include "cvmx-address.h"
40 #include "cvmx-fpa-defs.h"
41
42 #define CVMX_FPA_NUM_POOLS 8
43 #define CVMX_FPA_MIN_BLOCK_SIZE 128
44 #define CVMX_FPA_ALIGNMENT 128
45
46 /**
47 * Structure describing the data format used for stores to the FPA.
48 */
49 typedef union {
50 uint64_t u64;
51 struct {
52 /*
53 * the (64-bit word) location in scratchpad to write
54 * to (if len != 0)
55 */
56 uint64_t scraddr:8;
57 /* the number of words in the response (0 => no response) */
58 uint64_t len:8;
59 /* the ID of the device on the non-coherent bus */
60 uint64_t did:8;
61 /*
62 * the address that will appear in the first tick on
63 * the NCB bus.
64 */
65 uint64_t addr:40;
66 } s;
67 } cvmx_fpa_iobdma_data_t;
68
69 /**
70 * Structure describing the current state of a FPA pool.
71 */
72 typedef struct {
73 /* Name it was created under */
74 const char *name;
75 /* Size of each block */
76 uint64_t size;
77 /* The base memory address of whole block */
78 void *base;
79 /* The number of elements in the pool at creation */
80 uint64_t starting_element_count;
81 } cvmx_fpa_pool_info_t;
82
83 /**
84 * Current state of all the pools. Use access functions
85 * instead of using it directly.
86 */
87 extern cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
88
89 /* CSR typedefs have been moved to cvmx-csr-*.h */
90
91 /**
92 * Return the name of the pool
93 *
94 * @pool: Pool to get the name of
95 * Returns The name
96 */
97 static inline const char *cvmx_fpa_get_name(uint64_t pool)
98 {
99 return cvmx_fpa_pool_info[pool].name;
100 }
101
102 /**
103 * Return the base of the pool
104 *
105 * @pool: Pool to get the base of
106 * Returns The base
107 */
108 static inline void *cvmx_fpa_get_base(uint64_t pool)
109 {
110 return cvmx_fpa_pool_info[pool].base;
111 }
112
113 /**
114 * Check if a pointer belongs to an FPA pool. Return non-zero
115 * if the supplied pointer is inside the memory controlled by
116 * an FPA pool.
117 *
118 * @pool: Pool to check
119 * @ptr: Pointer to check
120 * Returns Non-zero if pointer is in the pool. Zero if not
121 */
122 static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr)
123 {
124 return ((ptr >= cvmx_fpa_pool_info[pool].base) &&
125 ((char *)ptr <
126 ((char *)(cvmx_fpa_pool_info[pool].base)) +
127 cvmx_fpa_pool_info[pool].size *
128 cvmx_fpa_pool_info[pool].starting_element_count));
129 }
130
131 /**
132 * Enable the FPA for use. Must be performed after any CSR
133 * configuration but before any other FPA functions.
134 */
135 static inline void cvmx_fpa_enable(void)
136 {
137 union cvmx_fpa_ctl_status status;
138
139 status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
140 if (status.s.enb) {
141 cvmx_dprintf
142 ("Warning: Enabling FPA when FPA already enabled.\n");
143 }
144
145 /*
146 * Do runtime check as we allow pass1 compiled code to run on
147 * pass2 chips.
148 */
149 if (cvmx_octeon_is_pass1()) {
150 union cvmx_fpa_fpfx_marks marks;
151 int i;
152 for (i = 1; i < 8; i++) {
153 marks.u64 =
154 cvmx_read_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull);
155 marks.s.fpf_wr = 0xe0;
156 cvmx_write_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull,
157 marks.u64);
158 }
159
160 /* Enforce a 10 cycle delay between config and enable */
161 cvmx_wait(10);
162 }
163
164 /* FIXME: CVMX_FPA_CTL_STATUS read is unmodelled */
165 status.u64 = 0;
166 status.s.enb = 1;
167 cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
168 }
169
170 /**
171 * Get a new block from the FPA
172 *
173 * @pool: Pool to get the block from
174 * Returns Pointer to the block or NULL on failure
175 */
176 static inline void *cvmx_fpa_alloc(uint64_t pool)
177 {
178 uint64_t address =
179 cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)));
180 if (address)
181 return cvmx_phys_to_ptr(address);
182 else
183 return NULL;
184 }
185
186 /**
187 * Asynchronously get a new block from the FPA
188 *
189 * @scr_addr: Local scratch address to put response in. This is a byte address,
190 * but must be 8 byte aligned.
191 * @pool: Pool to get the block from
192 */
193 static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
194 {
195 cvmx_fpa_iobdma_data_t data;
196
197 /*
198 * Hardware only uses 64 bit aligned locations, so convert
199 * from byte address to 64-bit index
200 */
201 data.s.scraddr = scr_addr >> 3;
202 data.s.len = 1;
203 data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool);
204 data.s.addr = 0;
205 cvmx_send_single(data.u64);
206 }
207
208 /**
209 * Free a block allocated with a FPA pool. Does NOT provide memory
210 * ordering in cases where the memory block was modified by the core.
211 *
212 * @ptr: Block to free
213 * @pool: Pool to put it in
214 * @num_cache_lines:
215 * Cache lines to invalidate
216 */
217 static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
218 uint64_t num_cache_lines)
219 {
220 cvmx_addr_t newptr;
221 newptr.u64 = cvmx_ptr_to_phys(ptr);
222 newptr.sfilldidspace.didspace =
223 CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
224 /* Prevent GCC from reordering around free */
225 barrier();
226 /* value written is number of cache lines not written back */
227 cvmx_write_io(newptr.u64, num_cache_lines);
228 }
229
230 /**
231 * Free a block allocated with a FPA pool. Provides required memory
232 * ordering in cases where memory block was modified by core.
233 *
234 * @ptr: Block to free
235 * @pool: Pool to put it in
236 * @num_cache_lines:
237 * Cache lines to invalidate
238 */
239 static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
240 uint64_t num_cache_lines)
241 {
242 cvmx_addr_t newptr;
243 newptr.u64 = cvmx_ptr_to_phys(ptr);
244 newptr.sfilldidspace.didspace =
245 CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
246 /*
247 * Make sure that any previous writes to memory go out before
248 * we free this buffer. This also serves as a barrier to
249 * prevent GCC from reordering operations to after the
250 * free.
251 */
252 CVMX_SYNCWS;
253 /* value written is number of cache lines not written back */
254 cvmx_write_io(newptr.u64, num_cache_lines);
255 }
256
257 /**
258 * Setup a FPA pool to control a new block of memory.
259 * This can only be called once per pool. Make sure proper
260 * locking enforces this.
261 *
262 * @pool: Pool to initialize
263 * 0 <= pool < 8
264 * @name: Constant character string to name this pool.
265 * String is not copied.
266 * @buffer: Pointer to the block of memory to use. This must be
267 * accessible by all processors and external hardware.
268 * @block_size: Size for each block controlled by the FPA
269 * @num_blocks: Number of blocks
270 *
271 * Returns 0 on Success,
272 * -1 on failure
273 */
274 extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
275 uint64_t block_size, uint64_t num_blocks);
276
277 /**
278 * Shutdown a Memory pool and validate that it had all of
279 * the buffers originally placed in it. This should only be
280 * called by one processor after all hardware has finished
281 * using the pool.
282 *
283 * @pool: Pool to shutdown
284 * Returns Zero on success
285 * - Positive is count of missing buffers
286 * - Negative is too many buffers or corrupted pointers
287 */
288 extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
289
290 /**
291 * Get the size of blocks controlled by the pool
292 * This is resolved to a constant at compile time.
293 *
294 * @pool: Pool to access
295 * Returns Size of the block in bytes
296 */
297 uint64_t cvmx_fpa_get_block_size(uint64_t pool);
298
299 #endif /* __CVM_FPA_H__ */