]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/bus/dpaa/base/qbman/bman_driver.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / drivers / bus / dpaa / base / qbman / bman_driver.c
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2 *
3 * Copyright 2008-2016 Freescale Semiconductor Inc.
4 * Copyright 2017 NXP
5 *
6 */
7
8 #include <rte_branch_prediction.h>
9
10 #include <fsl_usd.h>
11 #include <process.h>
12 #include "bman_priv.h"
13 #include <sys/ioctl.h>
14
15 /*
16 * Global variables of the max portal/pool number this bman version supported
17 */
18 static u16 bman_ip_rev;
19 u16 bman_pool_max;
20 static void *bman_ccsr_map;
21
22 /*****************/
23 /* Portal driver */
24 /*****************/
25
26 static __thread int bmfd = -1;
27 static __thread struct bm_portal_config pcfg;
28 static __thread struct dpaa_ioctl_portal_map map = {
29 .type = dpaa_portal_bman
30 };
31
32 static int fsl_bman_portal_init(uint32_t idx, int is_shared)
33 {
34 cpu_set_t cpuset;
35 struct bman_portal *portal;
36 int loop, ret;
37 struct dpaa_ioctl_irq_map irq_map;
38
39 /* Verify the thread's cpu-affinity */
40 ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
41 &cpuset);
42 if (ret) {
43 error(0, ret, "pthread_getaffinity_np()");
44 return ret;
45 }
46 pcfg.cpu = -1;
47 for (loop = 0; loop < CPU_SETSIZE; loop++)
48 if (CPU_ISSET(loop, &cpuset)) {
49 if (pcfg.cpu != -1) {
50 pr_err("Thread is not affine to 1 cpu");
51 return -EINVAL;
52 }
53 pcfg.cpu = loop;
54 }
55 if (pcfg.cpu == -1) {
56 pr_err("Bug in getaffinity handling!");
57 return -EINVAL;
58 }
59 /* Allocate and map a bman portal */
60 map.index = idx;
61 ret = process_portal_map(&map);
62 if (ret) {
63 error(0, ret, "process_portal_map()");
64 return ret;
65 }
66 /* Make the portal's cache-[enabled|inhibited] regions */
67 pcfg.addr_virt[DPAA_PORTAL_CE] = map.addr.cena;
68 pcfg.addr_virt[DPAA_PORTAL_CI] = map.addr.cinh;
69 pcfg.is_shared = is_shared;
70 pcfg.index = map.index;
71 bman_depletion_fill(&pcfg.mask);
72
73 bmfd = open(BMAN_PORTAL_IRQ_PATH, O_RDONLY);
74 if (bmfd == -1) {
75 pr_err("BMan irq init failed");
76 process_portal_unmap(&map.addr);
77 return -EBUSY;
78 }
79 /* Use the IRQ FD as a unique IRQ number */
80 pcfg.irq = bmfd;
81
82 portal = bman_create_affine_portal(&pcfg);
83 if (!portal) {
84 pr_err("Bman portal initialisation failed (%d)",
85 pcfg.cpu);
86 process_portal_unmap(&map.addr);
87 return -EBUSY;
88 }
89
90 /* Set the IRQ number */
91 irq_map.type = dpaa_portal_bman;
92 irq_map.portal_cinh = map.addr.cinh;
93 process_portal_irq_map(bmfd, &irq_map);
94 return 0;
95 }
96
97 static int fsl_bman_portal_finish(void)
98 {
99 __maybe_unused const struct bm_portal_config *cfg;
100 int ret;
101
102 process_portal_irq_unmap(bmfd);
103
104 cfg = bman_destroy_affine_portal();
105 DPAA_BUG_ON(cfg != &pcfg);
106 ret = process_portal_unmap(&map.addr);
107 if (ret)
108 error(0, ret, "process_portal_unmap()");
109 return ret;
110 }
111
112 int bman_thread_fd(void)
113 {
114 return bmfd;
115 }
116
117 int bman_thread_init(void)
118 {
119 /* Convert from contiguous/virtual cpu numbering to real cpu when
120 * calling into the code that is dependent on the device naming.
121 */
122 return fsl_bman_portal_init(QBMAN_ANY_PORTAL_IDX, 0);
123 }
124
125 int bman_thread_finish(void)
126 {
127 return fsl_bman_portal_finish();
128 }
129
130 void bman_thread_irq(void)
131 {
132 qbman_invoke_irq(pcfg.irq);
133 /* Now we need to uninhibit interrupts. This is the only code outside
134 * the regular portal driver that manipulates any portal register, so
135 * rather than breaking that encapsulation I am simply hard-coding the
136 * offset to the inhibit register here.
137 */
138 out_be32(pcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
139 }
140
141 int bman_init_ccsr(const struct device_node *node)
142 {
143 static int ccsr_map_fd;
144 uint64_t phys_addr;
145 const uint32_t *bman_addr;
146 uint64_t regs_size;
147
148 bman_addr = of_get_address(node, 0, &regs_size, NULL);
149 if (!bman_addr) {
150 pr_err("of_get_address cannot return BMan address");
151 return -EINVAL;
152 }
153 phys_addr = of_translate_address(node, bman_addr);
154 if (!phys_addr) {
155 pr_err("of_translate_address failed");
156 return -EINVAL;
157 }
158
159 ccsr_map_fd = open(BMAN_CCSR_MAP, O_RDWR);
160 if (unlikely(ccsr_map_fd < 0)) {
161 pr_err("Can not open /dev/mem for BMan CCSR map");
162 return ccsr_map_fd;
163 }
164
165 bman_ccsr_map = mmap(NULL, regs_size, PROT_READ |
166 PROT_WRITE, MAP_SHARED, ccsr_map_fd, phys_addr);
167 if (bman_ccsr_map == MAP_FAILED) {
168 pr_err("Can not map BMan CCSR base Bman: "
169 "0x%x Phys: 0x%" PRIx64 " size 0x%" PRIu64,
170 *bman_addr, phys_addr, regs_size);
171 return -EINVAL;
172 }
173
174 return 0;
175 }
176
177 int bman_global_init(void)
178 {
179 const struct device_node *dt_node;
180 static int done;
181
182 if (done)
183 return -EBUSY;
184 /* Use the device-tree to determine IP revision until something better
185 * is devised.
186 */
187 dt_node = of_find_compatible_node(NULL, NULL, "fsl,bman-portal");
188 if (!dt_node) {
189 pr_err("No bman portals available for any CPU\n");
190 return -ENODEV;
191 }
192 if (of_device_is_compatible(dt_node, "fsl,bman-portal-1.0") ||
193 of_device_is_compatible(dt_node, "fsl,bman-portal-1.0.0")) {
194 bman_ip_rev = BMAN_REV10;
195 bman_pool_max = 64;
196 } else if (of_device_is_compatible(dt_node, "fsl,bman-portal-2.0") ||
197 of_device_is_compatible(dt_node, "fsl,bman-portal-2.0.8")) {
198 bman_ip_rev = BMAN_REV20;
199 bman_pool_max = 8;
200 } else if (of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.0") ||
201 of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.1") ||
202 of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.2") ||
203 of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.3")) {
204 bman_ip_rev = BMAN_REV21;
205 bman_pool_max = 64;
206 } else {
207 pr_warn("unknown BMan version in portal node,default "
208 "to rev1.0");
209 bman_ip_rev = BMAN_REV10;
210 bman_pool_max = 64;
211 }
212
213 if (!bman_ip_rev) {
214 pr_err("Unknown bman portal version\n");
215 return -ENODEV;
216 }
217 {
218 const struct device_node *dn = of_find_compatible_node(NULL,
219 NULL, "fsl,bman");
220 if (!dn)
221 pr_err("No bman device node available");
222
223 if (bman_init_ccsr(dn))
224 pr_err("BMan CCSR map failed.");
225 }
226
227 done = 1;
228 return 0;
229 }
230
231 #define BMAN_POOL_CONTENT(n) (0x0600 + ((n) * 0x04))
232 u32 bm_pool_free_buffers(u32 bpid)
233 {
234 return in_be32(bman_ccsr_map + BMAN_POOL_CONTENT(bpid));
235 }
236
237 static u32 __generate_thresh(u32 val, int roundup)
238 {
239 u32 e = 0; /* co-efficient, exponent */
240 int oddbit = 0;
241
242 while (val > 0xff) {
243 oddbit = val & 1;
244 val >>= 1;
245 e++;
246 if (roundup && oddbit)
247 val++;
248 }
249 DPAA_ASSERT(e < 0x10);
250 return (val | (e << 8));
251 }
252
253 #define POOL_SWDET(n) (0x0000 + ((n) * 0x04))
254 #define POOL_HWDET(n) (0x0100 + ((n) * 0x04))
255 #define POOL_SWDXT(n) (0x0200 + ((n) * 0x04))
256 #define POOL_HWDXT(n) (0x0300 + ((n) * 0x04))
257 int bm_pool_set(u32 bpid, const u32 *thresholds)
258 {
259 if (!bman_ccsr_map)
260 return -ENODEV;
261 if (bpid >= bman_pool_max)
262 return -EINVAL;
263 out_be32(bman_ccsr_map + POOL_SWDET(bpid),
264 __generate_thresh(thresholds[0], 0));
265 out_be32(bman_ccsr_map + POOL_SWDXT(bpid),
266 __generate_thresh(thresholds[1], 1));
267 out_be32(bman_ccsr_map + POOL_HWDET(bpid),
268 __generate_thresh(thresholds[2], 0));
269 out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
270 __generate_thresh(thresholds[3], 1));
271 return 0;
272 }
273
274 #define BMAN_LOW_DEFAULT_THRESH 0x40
275 #define BMAN_HIGH_DEFAULT_THRESH 0x80
276 int bm_pool_set_hw_threshold(u32 bpid, const u32 low_thresh,
277 const u32 high_thresh)
278 {
279 if (!bman_ccsr_map)
280 return -ENODEV;
281 if (bpid >= bman_pool_max)
282 return -EINVAL;
283 if (low_thresh && high_thresh) {
284 out_be32(bman_ccsr_map + POOL_HWDET(bpid),
285 __generate_thresh(low_thresh, 0));
286 out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
287 __generate_thresh(high_thresh, 1));
288 } else {
289 out_be32(bman_ccsr_map + POOL_HWDET(bpid),
290 __generate_thresh(BMAN_LOW_DEFAULT_THRESH, 0));
291 out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
292 __generate_thresh(BMAN_HIGH_DEFAULT_THRESH, 1));
293 }
294 return 0;
295 }