blob: c00501d0f7ae078168b936691e466d9a8968f5d2 [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/**
29 * @file
30 *
31 * Interface to the hardware Free Pool Allocator.
32 *
33 *
34 */
35
36#ifndef __CVMX_FPA_H__
37#define __CVMX_FPA_H__
38
39#include <asm/octeon/cvmx-address.h>
40#include <asm/octeon/cvmx-fpa-defs.h>
41
42#define CVMX_FPA_NUM_POOLS 8
43#define CVMX_FPA_MIN_BLOCK_SIZE 128
44#define CVMX_FPA_ALIGNMENT 128
45
46/**
47 * Structure describing the data format used for stores to the FPA.
48 */
49typedef union {
50 uint64_t u64;
51 struct {
52#ifdef __BIG_ENDIAN_BITFIELD
53 /*
54 * the (64-bit word) location in scratchpad to write
55 * to (if len != 0)
56 */
57 uint64_t scraddr:8;
58 /* the number of words in the response (0 => no response) */
59 uint64_t len:8;
60 /* the ID of the device on the non-coherent bus */
61 uint64_t did:8;
62 /*
63 * the address that will appear in the first tick on
64 * the NCB bus.
65 */
66 uint64_t addr:40;
67#else
68 uint64_t addr:40;
69 uint64_t did:8;
70 uint64_t len:8;
71 uint64_t scraddr:8;
72#endif
73 } s;
74} cvmx_fpa_iobdma_data_t;
75
76/**
77 * Structure describing the current state of a FPA pool.
78 */
79typedef struct {
80 /* Name it was created under */
81 const char *name;
82 /* Size of each block */
83 uint64_t size;
84 /* The base memory address of whole block */
85 void *base;
86 /* The number of elements in the pool at creation */
87 uint64_t starting_element_count;
88} cvmx_fpa_pool_info_t;
89
90/**
91 * Current state of all the pools. Use access functions
92 * instead of using it directly.
93 */
94extern cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
95
96/* CSR typedefs have been moved to cvmx-csr-*.h */
97
98/**
99 * Return the name of the pool
100 *
101 * @pool: Pool to get the name of
102 * Returns The name
103 */
104static inline const char *cvmx_fpa_get_name(uint64_t pool)
105{
106 return cvmx_fpa_pool_info[pool].name;
107}
108
109/**
110 * Return the base of the pool
111 *
112 * @pool: Pool to get the base of
113 * Returns The base
114 */
115static inline void *cvmx_fpa_get_base(uint64_t pool)
116{
117 return cvmx_fpa_pool_info[pool].base;
118}
119
120/**
121 * Check if a pointer belongs to an FPA pool. Return non-zero
122 * if the supplied pointer is inside the memory controlled by
123 * an FPA pool.
124 *
125 * @pool: Pool to check
126 * @ptr: Pointer to check
127 * Returns Non-zero if pointer is in the pool. Zero if not
128 */
129static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr)
130{
131 return ((ptr >= cvmx_fpa_pool_info[pool].base) &&
132 ((char *)ptr <
133 ((char *)(cvmx_fpa_pool_info[pool].base)) +
134 cvmx_fpa_pool_info[pool].size *
135 cvmx_fpa_pool_info[pool].starting_element_count));
136}
137
138/**
139 * Enable the FPA for use. Must be performed after any CSR
140 * configuration but before any other FPA functions.
141 */
142static inline void cvmx_fpa_enable(void)
143{
144 union cvmx_fpa_ctl_status status;
145
146 status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
147 if (status.s.enb) {
148 cvmx_dprintf
149 ("Warning: Enabling FPA when FPA already enabled.\n");
150 }
151
152 /*
153 * Do runtime check as we allow pass1 compiled code to run on
154 * pass2 chips.
155 */
156 if (cvmx_octeon_is_pass1()) {
157 union cvmx_fpa_fpfx_marks marks;
158 int i;
159 for (i = 1; i < 8; i++) {
160 marks.u64 =
161 cvmx_read_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull);
162 marks.s.fpf_wr = 0xe0;
163 cvmx_write_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull,
164 marks.u64);
165 }
166
167 /* Enforce a 10 cycle delay between config and enable */
168 cvmx_wait(10);
169 }
170
171 /* FIXME: CVMX_FPA_CTL_STATUS read is unmodelled */
172 status.u64 = 0;
173 status.s.enb = 1;
174 cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
175}
176
177/**
178 * Get a new block from the FPA
179 *
180 * @pool: Pool to get the block from
181 * Returns Pointer to the block or NULL on failure
182 */
183static inline void *cvmx_fpa_alloc(uint64_t pool)
184{
185 uint64_t address =
186 cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)));
187 if (address)
188 return cvmx_phys_to_ptr(address);
189 else
190 return NULL;
191}
192
193/**
194 * Asynchronously get a new block from the FPA
195 *
196 * @scr_addr: Local scratch address to put response in. This is a byte address,
197 * but must be 8 byte aligned.
198 * @pool: Pool to get the block from
199 */
200static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
201{
202 cvmx_fpa_iobdma_data_t data;
203
204 /*
205 * Hardware only uses 64 bit aligned locations, so convert
206 * from byte address to 64-bit index
207 */
208 data.s.scraddr = scr_addr >> 3;
209 data.s.len = 1;
210 data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool);
211 data.s.addr = 0;
212 cvmx_send_single(data.u64);
213}
214
215/**
216 * Free a block allocated with a FPA pool. Does NOT provide memory
217 * ordering in cases where the memory block was modified by the core.
218 *
219 * @ptr: Block to free
220 * @pool: Pool to put it in
221 * @num_cache_lines:
222 * Cache lines to invalidate
223 */
224static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
225 uint64_t num_cache_lines)
226{
227 cvmx_addr_t newptr;
228 newptr.u64 = cvmx_ptr_to_phys(ptr);
229 newptr.sfilldidspace.didspace =
230 CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
231 /* Prevent GCC from reordering around free */
232 barrier();
233 /* value written is number of cache lines not written back */
234 cvmx_write_io(newptr.u64, num_cache_lines);
235}
236
237/**
238 * Free a block allocated with a FPA pool. Provides required memory
239 * ordering in cases where memory block was modified by core.
240 *
241 * @ptr: Block to free
242 * @pool: Pool to put it in
243 * @num_cache_lines:
244 * Cache lines to invalidate
245 */
246static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
247 uint64_t num_cache_lines)
248{
249 cvmx_addr_t newptr;
250 newptr.u64 = cvmx_ptr_to_phys(ptr);
251 newptr.sfilldidspace.didspace =
252 CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
253 /*
254 * Make sure that any previous writes to memory go out before
255 * we free this buffer. This also serves as a barrier to
256 * prevent GCC from reordering operations to after the
257 * free.
258 */
259 CVMX_SYNCWS;
260 /* value written is number of cache lines not written back */
261 cvmx_write_io(newptr.u64, num_cache_lines);
262}
263
264/**
265 * Setup a FPA pool to control a new block of memory.
266 * This can only be called once per pool. Make sure proper
267 * locking enforces this.
268 *
269 * @pool: Pool to initialize
270 * 0 <= pool < 8
271 * @name: Constant character string to name this pool.
272 * String is not copied.
273 * @buffer: Pointer to the block of memory to use. This must be
274 * accessible by all processors and external hardware.
275 * @block_size: Size for each block controlled by the FPA
276 * @num_blocks: Number of blocks
277 *
278 * Returns 0 on Success,
279 * -1 on failure
280 */
281extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
282 uint64_t block_size, uint64_t num_blocks);
283
284/**
285 * Shutdown a Memory pool and validate that it had all of
286 * the buffers originally placed in it. This should only be
287 * called by one processor after all hardware has finished
288 * using the pool.
289 *
290 * @pool: Pool to shutdown
291 * Returns Zero on success
292 * - Positive is count of missing buffers
293 * - Negative is too many buffers or corrupted pointers
294 */
295extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
296
297/**
298 * Get the size of blocks controlled by the pool
299 * This is resolved to a constant at compile time.
300 *
301 * @pool: Pool to access
302 * Returns Size of the block in bytes
303 */
304uint64_t cvmx_fpa_get_block_size(uint64_t pool);
305
306#endif /* __CVM_FPA_H__ */