]>
Commit | Line | Data |
---|---|---|
f21fb3ed RV |
1 | /********************************************************************** |
2 | * Author: Cavium, Inc. | |
3 | * | |
4 | * Contact: support@cavium.com | |
5 | * Please include "LiquidIO" in the subject. | |
6 | * | |
7 | * Copyright (c) 2003-2015 Cavium, Inc. | |
8 | * | |
9 | * This file is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License, Version 2, as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This file is distributed in the hope that it will be useful, but | |
14 | * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty | |
15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or | |
16 | * NONINFRINGEMENT. See the GNU General Public License for more | |
17 | * details. | |
18 | * | |
19 | * This file may also be available under a different license from Cavium. | |
20 | * Contact Cavium, Inc. for more information | |
21 | **********************************************************************/ | |
f21fb3ed | 22 | #include <linux/pci.h> |
f21fb3ed | 23 | #include <linux/netdevice.h> |
5b173cf9 | 24 | #include <linux/vmalloc.h> |
f21fb3ed RV |
25 | #include "liquidio_common.h" |
26 | #include "octeon_droq.h" | |
27 | #include "octeon_iq.h" | |
28 | #include "response_manager.h" | |
29 | #include "octeon_device.h" | |
f21fb3ed RV |
30 | #include "octeon_main.h" |
31 | #include "octeon_network.h" | |
32 | #include "cn66xx_regs.h" | |
33 | #include "cn66xx_device.h" | |
e86b1ab6 | 34 | #include "cn23xx_pf_device.h" |
f21fb3ed RV |
35 | |
36 | /** Default configuration | |
37 | * for CN66XX OCTEON Models. | |
38 | */ | |
39 | static struct octeon_config default_cn66xx_conf = { | |
40 | .card_type = LIO_210SV, | |
41 | .card_name = LIO_210SV_NAME, | |
42 | ||
43 | /** IQ attributes */ | |
44 | .iq = { | |
45 | .max_iqs = CN6XXX_CFG_IO_QUEUES, | |
46 | .pending_list_size = | |
47 | (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES), | |
48 | .instr_type = OCTEON_64BYTE_INSTR, | |
49 | .db_min = CN6XXX_DB_MIN, | |
50 | .db_timeout = CN6XXX_DB_TIMEOUT, | |
51 | } | |
52 | , | |
53 | ||
54 | /** OQ attributes */ | |
55 | .oq = { | |
56 | .max_oqs = CN6XXX_CFG_IO_QUEUES, | |
57 | .info_ptr = OCTEON_OQ_INFOPTR_MODE, | |
58 | .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD, | |
59 | .oq_intr_pkt = CN6XXX_OQ_INTR_PKT, | |
60 | .oq_intr_time = CN6XXX_OQ_INTR_TIME, | |
61 | .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR, | |
62 | } | |
63 | , | |
64 | ||
65 | .num_nic_ports = DEFAULT_NUM_NIC_PORTS_66XX, | |
66 | .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, | |
67 | .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, | |
68 | .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE, | |
69 | ||
70 | /* For ethernet interface 0: Port cfg Attributes */ | |
71 | .nic_if_cfg[0] = { | |
72 | /* Max Txqs: Half for each of the two ports :max_iq/2 */ | |
73 | .max_txqs = MAX_TXQS_PER_INTF, | |
74 | ||
75 | /* Actual configured value. Range could be: 1...max_txqs */ | |
76 | .num_txqs = DEF_TXQS_PER_INTF, | |
77 | ||
78 | /* Max Rxqs: Half for each of the two ports :max_oq/2 */ | |
79 | .max_rxqs = MAX_RXQS_PER_INTF, | |
80 | ||
81 | /* Actual configured value. Range could be: 1...max_rxqs */ | |
82 | .num_rxqs = DEF_RXQS_PER_INTF, | |
83 | ||
84 | /* Num of desc for rx rings */ | |
85 | .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, | |
86 | ||
87 | /* Num of desc for tx rings */ | |
88 | .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, | |
89 | ||
90 | /* SKB size, We need not change buf size even for Jumbo frames. | |
91 | * Octeon can send jumbo frames in 4 consecutive descriptors, | |
92 | */ | |
93 | .rx_buf_size = CN6XXX_OQ_BUF_SIZE, | |
94 | ||
95 | .base_queue = BASE_QUEUE_NOT_REQUESTED, | |
96 | ||
97 | .gmx_port_id = 0, | |
98 | }, | |
99 | ||
100 | .nic_if_cfg[1] = { | |
101 | /* Max Txqs: Half for each of the two ports :max_iq/2 */ | |
102 | .max_txqs = MAX_TXQS_PER_INTF, | |
103 | ||
104 | /* Actual configured value. Range could be: 1...max_txqs */ | |
105 | .num_txqs = DEF_TXQS_PER_INTF, | |
106 | ||
107 | /* Max Rxqs: Half for each of the two ports :max_oq/2 */ | |
108 | .max_rxqs = MAX_RXQS_PER_INTF, | |
109 | ||
110 | /* Actual configured value. Range could be: 1...max_rxqs */ | |
111 | .num_rxqs = DEF_RXQS_PER_INTF, | |
112 | ||
113 | /* Num of desc for rx rings */ | |
114 | .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, | |
115 | ||
116 | /* Num of desc for tx rings */ | |
117 | .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, | |
118 | ||
119 | /* SKB size, We need not change buf size even for Jumbo frames. | |
120 | * Octeon can send jumbo frames in 4 consecutive descriptors, | |
121 | */ | |
122 | .rx_buf_size = CN6XXX_OQ_BUF_SIZE, | |
123 | ||
124 | .base_queue = BASE_QUEUE_NOT_REQUESTED, | |
125 | ||
126 | .gmx_port_id = 1, | |
127 | }, | |
128 | ||
129 | /** Miscellaneous attributes */ | |
130 | .misc = { | |
131 | /* Host driver link query interval */ | |
132 | .oct_link_query_interval = 100, | |
133 | ||
134 | /* Octeon link query interval */ | |
135 | .host_link_query_interval = 500, | |
136 | ||
137 | .enable_sli_oq_bp = 0, | |
138 | ||
139 | /* Control queue group */ | |
140 | .ctrlq_grp = 1, | |
141 | } | |
142 | , | |
143 | }; | |
144 | ||
145 | /** Default configuration | |
146 | * for CN68XX OCTEON Model. | |
147 | */ | |
148 | ||
149 | static struct octeon_config default_cn68xx_conf = { | |
150 | .card_type = LIO_410NV, | |
151 | .card_name = LIO_410NV_NAME, | |
152 | ||
153 | /** IQ attributes */ | |
154 | .iq = { | |
155 | .max_iqs = CN6XXX_CFG_IO_QUEUES, | |
156 | .pending_list_size = | |
157 | (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES), | |
158 | .instr_type = OCTEON_64BYTE_INSTR, | |
159 | .db_min = CN6XXX_DB_MIN, | |
160 | .db_timeout = CN6XXX_DB_TIMEOUT, | |
161 | } | |
162 | , | |
163 | ||
164 | /** OQ attributes */ | |
165 | .oq = { | |
166 | .max_oqs = CN6XXX_CFG_IO_QUEUES, | |
167 | .info_ptr = OCTEON_OQ_INFOPTR_MODE, | |
168 | .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD, | |
169 | .oq_intr_pkt = CN6XXX_OQ_INTR_PKT, | |
170 | .oq_intr_time = CN6XXX_OQ_INTR_TIME, | |
171 | .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR, | |
172 | } | |
173 | , | |
174 | ||
175 | .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX, | |
176 | .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, | |
177 | .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, | |
178 | .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE, | |
179 | ||
180 | .nic_if_cfg[0] = { | |
181 | /* Max Txqs: Half for each of the two ports :max_iq/2 */ | |
182 | .max_txqs = MAX_TXQS_PER_INTF, | |
183 | ||
184 | /* Actual configured value. Range could be: 1...max_txqs */ | |
185 | .num_txqs = DEF_TXQS_PER_INTF, | |
186 | ||
187 | /* Max Rxqs: Half for each of the two ports :max_oq/2 */ | |
188 | .max_rxqs = MAX_RXQS_PER_INTF, | |
189 | ||
190 | /* Actual configured value. Range could be: 1...max_rxqs */ | |
191 | .num_rxqs = DEF_RXQS_PER_INTF, | |
192 | ||
193 | /* Num of desc for rx rings */ | |
194 | .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, | |
195 | ||
196 | /* Num of desc for tx rings */ | |
197 | .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, | |
198 | ||
199 | /* SKB size, We need not change buf size even for Jumbo frames. | |
200 | * Octeon can send jumbo frames in 4 consecutive descriptors, | |
201 | */ | |
202 | .rx_buf_size = CN6XXX_OQ_BUF_SIZE, | |
203 | ||
204 | .base_queue = BASE_QUEUE_NOT_REQUESTED, | |
205 | ||
206 | .gmx_port_id = 0, | |
207 | }, | |
208 | ||
209 | .nic_if_cfg[1] = { | |
210 | /* Max Txqs: Half for each of the two ports :max_iq/2 */ | |
211 | .max_txqs = MAX_TXQS_PER_INTF, | |
212 | ||
213 | /* Actual configured value. Range could be: 1...max_txqs */ | |
214 | .num_txqs = DEF_TXQS_PER_INTF, | |
215 | ||
216 | /* Max Rxqs: Half for each of the two ports :max_oq/2 */ | |
217 | .max_rxqs = MAX_RXQS_PER_INTF, | |
218 | ||
219 | /* Actual configured value. Range could be: 1...max_rxqs */ | |
220 | .num_rxqs = DEF_RXQS_PER_INTF, | |
221 | ||
222 | /* Num of desc for rx rings */ | |
223 | .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, | |
224 | ||
225 | /* Num of desc for tx rings */ | |
226 | .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, | |
227 | ||
228 | /* SKB size, We need not change buf size even for Jumbo frames. | |
229 | * Octeon can send jumbo frames in 4 consecutive descriptors, | |
230 | */ | |
231 | .rx_buf_size = CN6XXX_OQ_BUF_SIZE, | |
232 | ||
233 | .base_queue = BASE_QUEUE_NOT_REQUESTED, | |
234 | ||
235 | .gmx_port_id = 1, | |
236 | }, | |
237 | ||
238 | .nic_if_cfg[2] = { | |
239 | /* Max Txqs: Half for each of the two ports :max_iq/2 */ | |
240 | .max_txqs = MAX_TXQS_PER_INTF, | |
241 | ||
242 | /* Actual configured value. Range could be: 1...max_txqs */ | |
243 | .num_txqs = DEF_TXQS_PER_INTF, | |
244 | ||
245 | /* Max Rxqs: Half for each of the two ports :max_oq/2 */ | |
246 | .max_rxqs = MAX_RXQS_PER_INTF, | |
247 | ||
248 | /* Actual configured value. Range could be: 1...max_rxqs */ | |
249 | .num_rxqs = DEF_RXQS_PER_INTF, | |
250 | ||
251 | /* Num of desc for rx rings */ | |
252 | .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, | |
253 | ||
254 | /* Num of desc for tx rings */ | |
255 | .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, | |
256 | ||
257 | /* SKB size, We need not change buf size even for Jumbo frames. | |
258 | * Octeon can send jumbo frames in 4 consecutive descriptors, | |
259 | */ | |
260 | .rx_buf_size = CN6XXX_OQ_BUF_SIZE, | |
261 | ||
262 | .base_queue = BASE_QUEUE_NOT_REQUESTED, | |
263 | ||
264 | .gmx_port_id = 2, | |
265 | }, | |
266 | ||
267 | .nic_if_cfg[3] = { | |
268 | /* Max Txqs: Half for each of the two ports :max_iq/2 */ | |
269 | .max_txqs = MAX_TXQS_PER_INTF, | |
270 | ||
271 | /* Actual configured value. Range could be: 1...max_txqs */ | |
272 | .num_txqs = DEF_TXQS_PER_INTF, | |
273 | ||
274 | /* Max Rxqs: Half for each of the two ports :max_oq/2 */ | |
275 | .max_rxqs = MAX_RXQS_PER_INTF, | |
276 | ||
277 | /* Actual configured value. Range could be: 1...max_rxqs */ | |
278 | .num_rxqs = DEF_RXQS_PER_INTF, | |
279 | ||
280 | /* Num of desc for rx rings */ | |
281 | .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, | |
282 | ||
283 | /* Num of desc for tx rings */ | |
284 | .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, | |
285 | ||
286 | /* SKB size, We need not change buf size even for Jumbo frames. | |
287 | * Octeon can send jumbo frames in 4 consecutive descriptors, | |
288 | */ | |
289 | .rx_buf_size = CN6XXX_OQ_BUF_SIZE, | |
290 | ||
291 | .base_queue = BASE_QUEUE_NOT_REQUESTED, | |
292 | ||
293 | .gmx_port_id = 3, | |
294 | }, | |
295 | ||
296 | /** Miscellaneous attributes */ | |
297 | .misc = { | |
298 | /* Host driver link query interval */ | |
299 | .oct_link_query_interval = 100, | |
300 | ||
301 | /* Octeon link query interval */ | |
302 | .host_link_query_interval = 500, | |
303 | ||
304 | .enable_sli_oq_bp = 0, | |
305 | ||
306 | /* Control queue group */ | |
307 | .ctrlq_grp = 1, | |
308 | } | |
309 | , | |
310 | }; | |
311 | ||
312 | /** Default configuration | |
313 | * for CN68XX OCTEON Model. | |
314 | */ | |
315 | static struct octeon_config default_cn68xx_210nv_conf = { | |
316 | .card_type = LIO_210NV, | |
317 | .card_name = LIO_210NV_NAME, | |
318 | ||
319 | /** IQ attributes */ | |
320 | ||
321 | .iq = { | |
322 | .max_iqs = CN6XXX_CFG_IO_QUEUES, | |
323 | .pending_list_size = | |
324 | (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES), | |
325 | .instr_type = OCTEON_64BYTE_INSTR, | |
326 | .db_min = CN6XXX_DB_MIN, | |
327 | .db_timeout = CN6XXX_DB_TIMEOUT, | |
328 | } | |
329 | , | |
330 | ||
331 | /** OQ attributes */ | |
332 | .oq = { | |
333 | .max_oqs = CN6XXX_CFG_IO_QUEUES, | |
334 | .info_ptr = OCTEON_OQ_INFOPTR_MODE, | |
335 | .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD, | |
336 | .oq_intr_pkt = CN6XXX_OQ_INTR_PKT, | |
337 | .oq_intr_time = CN6XXX_OQ_INTR_TIME, | |
338 | .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR, | |
339 | } | |
340 | , | |
341 | ||
342 | .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX_210NV, | |
343 | .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, | |
344 | .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, | |
345 | .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE, | |
346 | ||
347 | .nic_if_cfg[0] = { | |
348 | /* Max Txqs: Half for each of the two ports :max_iq/2 */ | |
349 | .max_txqs = MAX_TXQS_PER_INTF, | |
350 | ||
351 | /* Actual configured value. Range could be: 1...max_txqs */ | |
352 | .num_txqs = DEF_TXQS_PER_INTF, | |
353 | ||
354 | /* Max Rxqs: Half for each of the two ports :max_oq/2 */ | |
355 | .max_rxqs = MAX_RXQS_PER_INTF, | |
356 | ||
357 | /* Actual configured value. Range could be: 1...max_rxqs */ | |
358 | .num_rxqs = DEF_RXQS_PER_INTF, | |
359 | ||
360 | /* Num of desc for rx rings */ | |
361 | .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, | |
362 | ||
363 | /* Num of desc for tx rings */ | |
364 | .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, | |
365 | ||
366 | /* SKB size, We need not change buf size even for Jumbo frames. | |
367 | * Octeon can send jumbo frames in 4 consecutive descriptors, | |
368 | */ | |
369 | .rx_buf_size = CN6XXX_OQ_BUF_SIZE, | |
370 | ||
371 | .base_queue = BASE_QUEUE_NOT_REQUESTED, | |
372 | ||
373 | .gmx_port_id = 0, | |
374 | }, | |
375 | ||
376 | .nic_if_cfg[1] = { | |
377 | /* Max Txqs: Half for each of the two ports :max_iq/2 */ | |
378 | .max_txqs = MAX_TXQS_PER_INTF, | |
379 | ||
380 | /* Actual configured value. Range could be: 1...max_txqs */ | |
381 | .num_txqs = DEF_TXQS_PER_INTF, | |
382 | ||
383 | /* Max Rxqs: Half for each of the two ports :max_oq/2 */ | |
384 | .max_rxqs = MAX_RXQS_PER_INTF, | |
385 | ||
386 | /* Actual configured value. Range could be: 1...max_rxqs */ | |
387 | .num_rxqs = DEF_RXQS_PER_INTF, | |
388 | ||
389 | /* Num of desc for rx rings */ | |
390 | .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, | |
391 | ||
392 | /* Num of desc for tx rings */ | |
393 | .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, | |
394 | ||
395 | /* SKB size, We need not change buf size even for Jumbo frames. | |
396 | * Octeon can send jumbo frames in 4 consecutive descriptors, | |
397 | */ | |
398 | .rx_buf_size = CN6XXX_OQ_BUF_SIZE, | |
399 | ||
400 | .base_queue = BASE_QUEUE_NOT_REQUESTED, | |
401 | ||
402 | .gmx_port_id = 1, | |
403 | }, | |
404 | ||
405 | /** Miscellaneous attributes */ | |
406 | .misc = { | |
407 | /* Host driver link query interval */ | |
408 | .oct_link_query_interval = 100, | |
409 | ||
410 | /* Octeon link query interval */ | |
411 | .host_link_query_interval = 500, | |
412 | ||
413 | .enable_sli_oq_bp = 0, | |
414 | ||
415 | /* Control queue group */ | |
416 | .ctrlq_grp = 1, | |
417 | } | |
418 | , | |
419 | }; | |
420 | ||
e86b1ab6 RV |
421 | static struct octeon_config default_cn23xx_conf = { |
422 | .card_type = LIO_23XX, | |
423 | .card_name = LIO_23XX_NAME, | |
424 | /** IQ attributes */ | |
425 | .iq = { | |
426 | .max_iqs = CN23XX_CFG_IO_QUEUES, | |
427 | .pending_list_size = (CN23XX_MAX_IQ_DESCRIPTORS * | |
428 | CN23XX_CFG_IO_QUEUES), | |
429 | .instr_type = OCTEON_64BYTE_INSTR, | |
430 | .db_min = CN23XX_DB_MIN, | |
431 | .db_timeout = CN23XX_DB_TIMEOUT, | |
432 | .iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD, | |
433 | }, | |
434 | ||
435 | /** OQ attributes */ | |
436 | .oq = { | |
437 | .max_oqs = CN23XX_CFG_IO_QUEUES, | |
438 | .info_ptr = OCTEON_OQ_INFOPTR_MODE, | |
439 | .pkts_per_intr = CN23XX_OQ_PKTSPER_INTR, | |
440 | .refill_threshold = CN23XX_OQ_REFIL_THRESHOLD, | |
441 | .oq_intr_pkt = CN23XX_OQ_INTR_PKT, | |
442 | .oq_intr_time = CN23XX_OQ_INTR_TIME, | |
443 | }, | |
444 | ||
445 | .num_nic_ports = DEFAULT_NUM_NIC_PORTS_23XX, | |
446 | .num_def_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS, | |
447 | .num_def_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS, | |
448 | .def_rx_buf_size = CN23XX_OQ_BUF_SIZE, | |
449 | ||
450 | /* For ethernet interface 0: Port cfg Attributes */ | |
451 | .nic_if_cfg[0] = { | |
452 | /* Max Txqs: Half for each of the two ports :max_iq/2 */ | |
453 | .max_txqs = MAX_TXQS_PER_INTF, | |
454 | ||
455 | /* Actual configured value. Range could be: 1...max_txqs */ | |
456 | .num_txqs = DEF_TXQS_PER_INTF, | |
457 | ||
458 | /* Max Rxqs: Half for each of the two ports :max_oq/2 */ | |
459 | .max_rxqs = MAX_RXQS_PER_INTF, | |
460 | ||
461 | /* Actual configured value. Range could be: 1...max_rxqs */ | |
462 | .num_rxqs = DEF_RXQS_PER_INTF, | |
463 | ||
464 | /* Num of desc for rx rings */ | |
465 | .num_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS, | |
466 | ||
467 | /* Num of desc for tx rings */ | |
468 | .num_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS, | |
469 | ||
470 | /* SKB size, We need not change buf size even for Jumbo frames. | |
471 | * Octeon can send jumbo frames in 4 consecutive descriptors, | |
472 | */ | |
473 | .rx_buf_size = CN23XX_OQ_BUF_SIZE, | |
474 | ||
475 | .base_queue = BASE_QUEUE_NOT_REQUESTED, | |
476 | ||
477 | .gmx_port_id = 0, | |
478 | }, | |
479 | ||
480 | .nic_if_cfg[1] = { | |
481 | /* Max Txqs: Half for each of the two ports :max_iq/2 */ | |
482 | .max_txqs = MAX_TXQS_PER_INTF, | |
483 | ||
484 | /* Actual configured value. Range could be: 1...max_txqs */ | |
485 | .num_txqs = DEF_TXQS_PER_INTF, | |
486 | ||
487 | /* Max Rxqs: Half for each of the two ports :max_oq/2 */ | |
488 | .max_rxqs = MAX_RXQS_PER_INTF, | |
489 | ||
490 | /* Actual configured value. Range could be: 1...max_rxqs */ | |
491 | .num_rxqs = DEF_RXQS_PER_INTF, | |
492 | ||
493 | /* Num of desc for rx rings */ | |
494 | .num_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS, | |
495 | ||
496 | /* Num of desc for tx rings */ | |
497 | .num_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS, | |
498 | ||
499 | /* SKB size, We need not change buf size even for Jumbo frames. | |
500 | * Octeon can send jumbo frames in 4 consecutive descriptors, | |
501 | */ | |
502 | .rx_buf_size = CN23XX_OQ_BUF_SIZE, | |
503 | ||
504 | .base_queue = BASE_QUEUE_NOT_REQUESTED, | |
505 | ||
506 | .gmx_port_id = 1, | |
507 | }, | |
508 | ||
509 | .misc = { | |
510 | /* Host driver link query interval */ | |
511 | .oct_link_query_interval = 100, | |
512 | ||
513 | /* Octeon link query interval */ | |
514 | .host_link_query_interval = 500, | |
515 | ||
516 | .enable_sli_oq_bp = 0, | |
517 | ||
518 | /* Control queue group */ | |
519 | .ctrlq_grp = 1, | |
520 | } | |
521 | }; | |
522 | ||
f21fb3ed RV |
523 | enum { |
524 | OCTEON_CONFIG_TYPE_DEFAULT = 0, | |
525 | NUM_OCTEON_CONFS, | |
526 | }; | |
527 | ||
528 | static struct octeon_config_ptr { | |
529 | u32 conf_type; | |
530 | } oct_conf_info[MAX_OCTEON_DEVICES] = { | |
531 | { | |
532 | OCTEON_CONFIG_TYPE_DEFAULT, | |
533 | }, { | |
534 | OCTEON_CONFIG_TYPE_DEFAULT, | |
535 | }, { | |
536 | OCTEON_CONFIG_TYPE_DEFAULT, | |
537 | }, { | |
538 | OCTEON_CONFIG_TYPE_DEFAULT, | |
539 | }, | |
540 | }; | |
541 | ||
542 | static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = { | |
a2c64b67 | 543 | "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE", |
f21fb3ed RV |
544 | "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE", |
545 | "DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE", | |
a2c64b67 | 546 | "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET", |
f21fb3ed RV |
547 | "INVALID" |
548 | }; | |
549 | ||
550 | static char oct_dev_app_str[CVM_DRV_APP_COUNT + 1][32] = { | |
551 | "BASE", "NIC", "UNKNOWN"}; | |
552 | ||
553 | static struct octeon_device *octeon_device[MAX_OCTEON_DEVICES]; | |
554 | static u32 octeon_device_count; | |
555 | ||
556 | static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES]; | |
557 | ||
5b173cf9 | 558 | static void oct_set_config_info(int oct_id, int conf_type) |
f21fb3ed RV |
559 | { |
560 | if (conf_type < 0 || conf_type > (NUM_OCTEON_CONFS - 1)) | |
561 | conf_type = OCTEON_CONFIG_TYPE_DEFAULT; | |
562 | oct_conf_info[oct_id].conf_type = conf_type; | |
563 | } | |
564 | ||
565 | void octeon_init_device_list(int conf_type) | |
566 | { | |
567 | int i; | |
568 | ||
569 | memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES)); | |
570 | for (i = 0; i < MAX_OCTEON_DEVICES; i++) | |
571 | oct_set_config_info(i, conf_type); | |
572 | } | |
573 | ||
574 | static void *__retrieve_octeon_config_info(struct octeon_device *oct, | |
575 | u16 card_type) | |
576 | { | |
577 | u32 oct_id = oct->octeon_id; | |
578 | void *ret = NULL; | |
579 | ||
580 | switch (oct_conf_info[oct_id].conf_type) { | |
581 | case OCTEON_CONFIG_TYPE_DEFAULT: | |
582 | if (oct->chip_id == OCTEON_CN66XX) { | |
583 | ret = (void *)&default_cn66xx_conf; | |
584 | } else if ((oct->chip_id == OCTEON_CN68XX) && | |
585 | (card_type == LIO_210NV)) { | |
586 | ret = (void *)&default_cn68xx_210nv_conf; | |
587 | } else if ((oct->chip_id == OCTEON_CN68XX) && | |
588 | (card_type == LIO_410NV)) { | |
589 | ret = (void *)&default_cn68xx_conf; | |
e86b1ab6 RV |
590 | } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { |
591 | ret = (void *)&default_cn23xx_conf; | |
f21fb3ed RV |
592 | } |
593 | break; | |
594 | default: | |
595 | break; | |
596 | } | |
597 | return ret; | |
598 | } | |
599 | ||
600 | static int __verify_octeon_config_info(struct octeon_device *oct, void *conf) | |
601 | { | |
602 | switch (oct->chip_id) { | |
603 | case OCTEON_CN66XX: | |
604 | case OCTEON_CN68XX: | |
605 | return lio_validate_cn6xxx_config_info(oct, conf); | |
e86b1ab6 RV |
606 | case OCTEON_CN23XX_PF_VID: |
607 | return 0; | |
f21fb3ed RV |
608 | default: |
609 | break; | |
610 | } | |
611 | ||
612 | return 1; | |
613 | } | |
614 | ||
615 | void *oct_get_config_info(struct octeon_device *oct, u16 card_type) | |
616 | { | |
617 | void *conf = NULL; | |
618 | ||
619 | conf = __retrieve_octeon_config_info(oct, card_type); | |
620 | if (!conf) | |
621 | return NULL; | |
622 | ||
623 | if (__verify_octeon_config_info(oct, conf)) { | |
624 | dev_err(&oct->pci_dev->dev, "Configuration verification failed\n"); | |
625 | return NULL; | |
626 | } | |
627 | ||
628 | return conf; | |
629 | } | |
630 | ||
631 | char *lio_get_state_string(atomic_t *state_ptr) | |
632 | { | |
633 | s32 istate = (s32)atomic_read(state_ptr); | |
634 | ||
635 | if (istate > OCT_DEV_STATES || istate < 0) | |
636 | return oct_dev_state_str[OCT_DEV_STATE_INVALID]; | |
637 | return oct_dev_state_str[istate]; | |
638 | } | |
639 | ||
640 | static char *get_oct_app_string(u32 app_mode) | |
641 | { | |
642 | if (app_mode <= CVM_DRV_APP_END) | |
643 | return oct_dev_app_str[app_mode - CVM_DRV_APP_START]; | |
644 | return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START]; | |
645 | } | |
646 | ||
f21fb3ed RV |
647 | void octeon_free_device_mem(struct octeon_device *oct) |
648 | { | |
1e0d30fe | 649 | int i; |
f21fb3ed | 650 | |
63da8404 | 651 | for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { |
1e0d30fe RV |
652 | if (oct->io_qmask.oq & (1ULL << i)) |
653 | vfree(oct->droq[i]); | |
f21fb3ed RV |
654 | } |
655 | ||
63da8404 | 656 | for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { |
1e0d30fe RV |
657 | if (oct->io_qmask.iq & (1ULL << i)) |
658 | vfree(oct->instr_queue[i]); | |
f21fb3ed RV |
659 | } |
660 | ||
661 | i = oct->octeon_id; | |
662 | vfree(oct); | |
663 | ||
664 | octeon_device[i] = NULL; | |
665 | octeon_device_count--; | |
666 | } | |
667 | ||
668 | static struct octeon_device *octeon_allocate_device_mem(u32 pci_id, | |
669 | u32 priv_size) | |
670 | { | |
671 | struct octeon_device *oct; | |
672 | u8 *buf = NULL; | |
673 | u32 octdevsize = 0, configsize = 0, size; | |
674 | ||
675 | switch (pci_id) { | |
676 | case OCTEON_CN68XX: | |
677 | case OCTEON_CN66XX: | |
678 | configsize = sizeof(struct octeon_cn6xxx); | |
679 | break; | |
680 | ||
e86b1ab6 RV |
681 | case OCTEON_CN23XX_PF_VID: |
682 | configsize = sizeof(struct octeon_cn23xx_pf); | |
683 | break; | |
f21fb3ed RV |
684 | default: |
685 | pr_err("%s: Unknown PCI Device: 0x%x\n", | |
686 | __func__, | |
687 | pci_id); | |
688 | return NULL; | |
689 | } | |
690 | ||
691 | if (configsize & 0x7) | |
692 | configsize += (8 - (configsize & 0x7)); | |
693 | ||
694 | octdevsize = sizeof(struct octeon_device); | |
695 | if (octdevsize & 0x7) | |
696 | octdevsize += (8 - (octdevsize & 0x7)); | |
697 | ||
698 | if (priv_size & 0x7) | |
699 | priv_size += (8 - (priv_size & 0x7)); | |
700 | ||
701 | size = octdevsize + priv_size + configsize + | |
702 | (sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE); | |
703 | ||
704 | buf = vmalloc(size); | |
705 | if (!buf) | |
706 | return NULL; | |
707 | ||
708 | memset(buf, 0, size); | |
709 | ||
710 | oct = (struct octeon_device *)buf; | |
711 | oct->priv = (void *)(buf + octdevsize); | |
712 | oct->chip = (void *)(buf + octdevsize + priv_size); | |
713 | oct->dispatch.dlist = (struct octeon_dispatch *) | |
714 | (buf + octdevsize + priv_size + configsize); | |
715 | ||
716 | return oct; | |
717 | } | |
718 | ||
719 | struct octeon_device *octeon_allocate_device(u32 pci_id, | |
720 | u32 priv_size) | |
721 | { | |
722 | u32 oct_idx = 0; | |
723 | struct octeon_device *oct = NULL; | |
724 | ||
725 | for (oct_idx = 0; oct_idx < MAX_OCTEON_DEVICES; oct_idx++) | |
726 | if (!octeon_device[oct_idx]) | |
727 | break; | |
728 | ||
729 | if (oct_idx == MAX_OCTEON_DEVICES) | |
730 | return NULL; | |
731 | ||
732 | oct = octeon_allocate_device_mem(pci_id, priv_size); | |
733 | if (!oct) | |
734 | return NULL; | |
735 | ||
736 | spin_lock_init(&oct->pci_win_lock); | |
737 | spin_lock_init(&oct->mem_access_lock); | |
738 | ||
739 | octeon_device_count++; | |
740 | octeon_device[oct_idx] = oct; | |
741 | ||
742 | oct->octeon_id = oct_idx; | |
63da8404 | 743 | snprintf(oct->device_name, sizeof(oct->device_name), |
f21fb3ed RV |
744 | "LiquidIO%d", (oct->octeon_id)); |
745 | ||
746 | return oct; | |
747 | } | |
748 | ||
5b07aee1 RV |
749 | int |
750 | octeon_allocate_ioq_vector(struct octeon_device *oct) | |
751 | { | |
752 | int i, num_ioqs = 0; | |
753 | struct octeon_ioq_vector *ioq_vector; | |
754 | int cpu_num; | |
755 | int size; | |
756 | ||
757 | if (OCTEON_CN23XX_PF(oct)) | |
758 | num_ioqs = oct->sriov_info.num_pf_rings; | |
759 | size = sizeof(struct octeon_ioq_vector) * num_ioqs; | |
760 | ||
761 | oct->ioq_vector = vmalloc(size); | |
762 | if (!oct->ioq_vector) | |
763 | return 1; | |
764 | memset(oct->ioq_vector, 0, size); | |
765 | for (i = 0; i < num_ioqs; i++) { | |
766 | ioq_vector = &oct->ioq_vector[i]; | |
767 | ioq_vector->oct_dev = oct; | |
768 | ioq_vector->iq_index = i; | |
769 | ioq_vector->droq_index = i; | |
5d65556b | 770 | ioq_vector->mbox = oct->mbox[i]; |
5b07aee1 RV |
771 | |
772 | cpu_num = i % num_online_cpus(); | |
773 | cpumask_set_cpu(cpu_num, &ioq_vector->affinity_mask); | |
774 | ||
775 | if (oct->chip_id == OCTEON_CN23XX_PF_VID) | |
776 | ioq_vector->ioq_num = i + oct->sriov_info.pf_srn; | |
777 | else | |
778 | ioq_vector->ioq_num = i; | |
779 | } | |
780 | return 0; | |
781 | } | |
782 | ||
783 | void | |
784 | octeon_free_ioq_vector(struct octeon_device *oct) | |
785 | { | |
786 | vfree(oct->ioq_vector); | |
787 | } | |
788 | ||
26236fa9 | 789 | /* this function is only for setting up the first queue */ |
f21fb3ed RV |
790 | int octeon_setup_instr_queues(struct octeon_device *oct) |
791 | { | |
f21fb3ed | 792 | u32 num_descs = 0; |
26236fa9 RV |
793 | u32 iq_no = 0; |
794 | union oct_txpciq txpciq; | |
795 | int numa_node = cpu_to_node(iq_no % num_online_cpus()); | |
f21fb3ed | 796 | |
26236fa9 | 797 | if (OCTEON_CN6XXX(oct)) |
f21fb3ed RV |
798 | num_descs = |
799 | CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf)); | |
e86b1ab6 RV |
800 | else if (OCTEON_CN23XX_PF(oct)) |
801 | num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn23xx_pf, | |
802 | conf)); | |
f21fb3ed RV |
803 | |
804 | oct->num_iqs = 0; | |
805 | ||
26236fa9 RV |
806 | oct->instr_queue[0] = vmalloc_node(sizeof(*oct->instr_queue[0]), |
807 | numa_node); | |
808 | if (!oct->instr_queue[0]) | |
809 | oct->instr_queue[0] = | |
f21fb3ed | 810 | vmalloc(sizeof(struct octeon_instr_queue)); |
26236fa9 RV |
811 | if (!oct->instr_queue[0]) |
812 | return 1; | |
813 | memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue)); | |
0cece6c5 | 814 | oct->instr_queue[0]->q_index = 0; |
26236fa9 | 815 | oct->instr_queue[0]->app_ctx = (void *)(size_t)0; |
0cece6c5 | 816 | oct->instr_queue[0]->ifidx = 0; |
26236fa9 RV |
817 | txpciq.u64 = 0; |
818 | txpciq.s.q_no = iq_no; | |
5b823514 | 819 | txpciq.s.pkind = oct->pfvf_hsword.pkind; |
26236fa9 RV |
820 | txpciq.s.use_qpg = 0; |
821 | txpciq.s.qpg = 0; | |
822 | if (octeon_init_instr_queue(oct, txpciq, num_descs)) { | |
823 | /* prevent memory leak */ | |
824 | vfree(oct->instr_queue[0]); | |
825 | return 1; | |
f21fb3ed RV |
826 | } |
827 | ||
26236fa9 | 828 | oct->num_iqs++; |
f21fb3ed RV |
829 | return 0; |
830 | } | |
831 | ||
832 | int octeon_setup_output_queues(struct octeon_device *oct) | |
833 | { | |
f21fb3ed RV |
834 | u32 num_descs = 0; |
835 | u32 desc_size = 0; | |
96ae48b7 RV |
836 | u32 oq_no = 0; |
837 | int numa_node = cpu_to_node(oq_no % num_online_cpus()); | |
f21fb3ed | 838 | |
f21fb3ed | 839 | if (OCTEON_CN6XXX(oct)) { |
f21fb3ed RV |
840 | num_descs = |
841 | CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf)); | |
842 | desc_size = | |
843 | CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn6xxx, conf)); | |
e86b1ab6 RV |
844 | } else if (OCTEON_CN23XX_PF(oct)) { |
845 | num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn23xx_pf, | |
846 | conf)); | |
847 | desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn23xx_pf, | |
848 | conf)); | |
f21fb3ed | 849 | } |
f21fb3ed | 850 | oct->num_oqs = 0; |
96ae48b7 RV |
851 | oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node); |
852 | if (!oct->droq[0]) | |
853 | oct->droq[0] = vmalloc(sizeof(*oct->droq[0])); | |
854 | if (!oct->droq[0]) | |
855 | return 1; | |
f21fb3ed | 856 | |
96ae48b7 RV |
857 | if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL)) |
858 | return 1; | |
859 | oct->num_oqs++; | |
f21fb3ed RV |
860 | |
861 | return 0; | |
862 | } | |
863 | ||
864 | void octeon_set_io_queues_off(struct octeon_device *oct) | |
865 | { | |
5b823514 RV |
866 | if (OCTEON_CN6XXX(oct)) { |
867 | octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0); | |
868 | octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0); | |
869 | } | |
f21fb3ed RV |
870 | } |
871 | ||
872 | void octeon_set_droq_pkt_op(struct octeon_device *oct, | |
873 | u32 q_no, | |
874 | u32 enable) | |
875 | { | |
876 | u32 reg_val = 0; | |
877 | ||
878 | /* Disable the i/p and o/p queues for this Octeon. */ | |
5b823514 RV |
879 | if (OCTEON_CN6XXX(oct)) { |
880 | reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB); | |
f21fb3ed | 881 | |
5b823514 RV |
882 | if (enable) |
883 | reg_val = reg_val | (1 << q_no); | |
884 | else | |
885 | reg_val = reg_val & (~(1 << q_no)); | |
f21fb3ed | 886 | |
5b823514 RV |
887 | octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val); |
888 | } | |
f21fb3ed RV |
889 | } |
890 | ||
891 | int octeon_init_dispatch_list(struct octeon_device *oct) | |
892 | { | |
893 | u32 i; | |
894 | ||
895 | oct->dispatch.count = 0; | |
896 | ||
897 | for (i = 0; i < DISPATCH_LIST_SIZE; i++) { | |
898 | oct->dispatch.dlist[i].opcode = 0; | |
899 | INIT_LIST_HEAD(&oct->dispatch.dlist[i].list); | |
900 | } | |
901 | ||
902 | for (i = 0; i <= REQTYPE_LAST; i++) | |
903 | octeon_register_reqtype_free_fn(oct, i, NULL); | |
904 | ||
905 | spin_lock_init(&oct->dispatch.lock); | |
906 | ||
907 | return 0; | |
908 | } | |
909 | ||
910 | void octeon_delete_dispatch_list(struct octeon_device *oct) | |
911 | { | |
912 | u32 i; | |
913 | struct list_head freelist, *temp, *tmp2; | |
914 | ||
915 | INIT_LIST_HEAD(&freelist); | |
916 | ||
917 | spin_lock_bh(&oct->dispatch.lock); | |
918 | ||
919 | for (i = 0; i < DISPATCH_LIST_SIZE; i++) { | |
920 | struct list_head *dispatch; | |
921 | ||
922 | dispatch = &oct->dispatch.dlist[i].list; | |
923 | while (dispatch->next != dispatch) { | |
924 | temp = dispatch->next; | |
925 | list_del(temp); | |
926 | list_add_tail(temp, &freelist); | |
927 | } | |
928 | ||
929 | oct->dispatch.dlist[i].opcode = 0; | |
930 | } | |
931 | ||
932 | oct->dispatch.count = 0; | |
933 | ||
934 | spin_unlock_bh(&oct->dispatch.lock); | |
935 | ||
936 | list_for_each_safe(temp, tmp2, &freelist) { | |
937 | list_del(temp); | |
938 | vfree(temp); | |
939 | } | |
940 | } | |
941 | ||
942 | octeon_dispatch_fn_t | |
943 | octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode, | |
944 | u16 subcode) | |
945 | { | |
946 | u32 idx; | |
947 | struct list_head *dispatch; | |
948 | octeon_dispatch_fn_t fn = NULL; | |
949 | u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode); | |
950 | ||
951 | idx = combined_opcode & OCTEON_OPCODE_MASK; | |
952 | ||
953 | spin_lock_bh(&octeon_dev->dispatch.lock); | |
954 | ||
955 | if (octeon_dev->dispatch.count == 0) { | |
956 | spin_unlock_bh(&octeon_dev->dispatch.lock); | |
957 | return NULL; | |
958 | } | |
959 | ||
960 | if (!(octeon_dev->dispatch.dlist[idx].opcode)) { | |
961 | spin_unlock_bh(&octeon_dev->dispatch.lock); | |
962 | return NULL; | |
963 | } | |
964 | ||
965 | if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) { | |
966 | fn = octeon_dev->dispatch.dlist[idx].dispatch_fn; | |
967 | } else { | |
968 | list_for_each(dispatch, | |
969 | &octeon_dev->dispatch.dlist[idx].list) { | |
970 | if (((struct octeon_dispatch *)dispatch)->opcode == | |
971 | combined_opcode) { | |
972 | fn = ((struct octeon_dispatch *) | |
973 | dispatch)->dispatch_fn; | |
974 | break; | |
975 | } | |
976 | } | |
977 | } | |
978 | ||
979 | spin_unlock_bh(&octeon_dev->dispatch.lock); | |
980 | return fn; | |
981 | } | |
982 | ||
983 | /* octeon_register_dispatch_fn | |
984 | * Parameters: | |
985 | * octeon_id - id of the octeon device. | |
986 | * opcode - opcode for which driver should call the registered function | |
987 | * subcode - subcode for which driver should call the registered function | |
988 | * fn - The function to call when a packet with "opcode" arrives in | |
989 | * octeon output queues. | |
990 | * fn_arg - The argument to be passed when calling function "fn". | |
991 | * Description: | |
992 | * Registers a function and its argument to be called when a packet | |
993 | * arrives in Octeon output queues with "opcode". | |
994 | * Returns: | |
995 | * Success: 0 | |
996 | * Failure: 1 | |
997 | * Locks: | |
998 | * No locks are held. | |
999 | */ | |
1000 | int | |
1001 | octeon_register_dispatch_fn(struct octeon_device *oct, | |
1002 | u16 opcode, | |
1003 | u16 subcode, | |
1004 | octeon_dispatch_fn_t fn, void *fn_arg) | |
1005 | { | |
1006 | u32 idx; | |
1007 | octeon_dispatch_fn_t pfn; | |
1008 | u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode); | |
1009 | ||
1010 | idx = combined_opcode & OCTEON_OPCODE_MASK; | |
1011 | ||
1012 | spin_lock_bh(&oct->dispatch.lock); | |
1013 | /* Add dispatch function to first level of lookup table */ | |
1014 | if (oct->dispatch.dlist[idx].opcode == 0) { | |
1015 | oct->dispatch.dlist[idx].opcode = combined_opcode; | |
1016 | oct->dispatch.dlist[idx].dispatch_fn = fn; | |
1017 | oct->dispatch.dlist[idx].arg = fn_arg; | |
1018 | oct->dispatch.count++; | |
1019 | spin_unlock_bh(&oct->dispatch.lock); | |
1020 | return 0; | |
1021 | } | |
1022 | ||
1023 | spin_unlock_bh(&oct->dispatch.lock); | |
1024 | ||
1025 | /* Check if there was a function already registered for this | |
1026 | * opcode/subcode. | |
1027 | */ | |
1028 | pfn = octeon_get_dispatch(oct, opcode, subcode); | |
1029 | if (!pfn) { | |
1030 | struct octeon_dispatch *dispatch; | |
1031 | ||
1032 | dev_dbg(&oct->pci_dev->dev, | |
1033 | "Adding opcode to dispatch list linked list\n"); | |
1034 | dispatch = (struct octeon_dispatch *) | |
1035 | vmalloc(sizeof(struct octeon_dispatch)); | |
1036 | if (!dispatch) { | |
1037 | dev_err(&oct->pci_dev->dev, | |
1038 | "No memory to add dispatch function\n"); | |
1039 | return 1; | |
1040 | } | |
1041 | dispatch->opcode = combined_opcode; | |
1042 | dispatch->dispatch_fn = fn; | |
1043 | dispatch->arg = fn_arg; | |
1044 | ||
1045 | /* Add dispatch function to linked list of fn ptrs | |
1046 | * at the hashed index. | |
1047 | */ | |
1048 | spin_lock_bh(&oct->dispatch.lock); | |
1049 | list_add(&dispatch->list, &oct->dispatch.dlist[idx].list); | |
1050 | oct->dispatch.count++; | |
1051 | spin_unlock_bh(&oct->dispatch.lock); | |
1052 | ||
1053 | } else { | |
1054 | dev_err(&oct->pci_dev->dev, | |
1055 | "Found previously registered dispatch fn for opcode/subcode: %x/%x\n", | |
1056 | opcode, subcode); | |
1057 | return 1; | |
1058 | } | |
1059 | ||
1060 | return 0; | |
1061 | } | |
1062 | ||
f21fb3ed RV |
1063 | int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf) |
1064 | { | |
1065 | u32 i; | |
1066 | char app_name[16]; | |
1067 | struct octeon_device *oct = (struct octeon_device *)buf; | |
1068 | struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; | |
1069 | struct octeon_core_setup *cs = NULL; | |
1070 | u32 num_nic_ports = 0; | |
1071 | ||
1072 | if (OCTEON_CN6XXX(oct)) | |
1073 | num_nic_ports = | |
1074 | CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn6xxx, conf)); | |
e86b1ab6 RV |
1075 | else if (OCTEON_CN23XX_PF(oct)) |
1076 | num_nic_ports = | |
1077 | CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn23xx_pf, conf)); | |
f21fb3ed RV |
1078 | |
1079 | if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) { | |
1080 | dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n", | |
1081 | atomic_read(&oct->status)); | |
1082 | goto core_drv_init_err; | |
1083 | } | |
1084 | ||
1085 | strncpy(app_name, | |
1086 | get_oct_app_string( | |
1087 | (u32)recv_pkt->rh.r_core_drv_init.app_mode), | |
1088 | sizeof(app_name) - 1); | |
1089 | oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode; | |
5b173cf9 | 1090 | if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP) { |
f21fb3ed RV |
1091 | oct->fw_info.max_nic_ports = |
1092 | (u32)recv_pkt->rh.r_core_drv_init.max_nic_ports; | |
1093 | oct->fw_info.num_gmx_ports = | |
1094 | (u32)recv_pkt->rh.r_core_drv_init.num_gmx_ports; | |
5b173cf9 | 1095 | } |
f21fb3ed RV |
1096 | |
1097 | if (oct->fw_info.max_nic_ports < num_nic_ports) { | |
1098 | dev_err(&oct->pci_dev->dev, | |
1099 | "Config has more ports than firmware allows (%d > %d).\n", | |
1100 | num_nic_ports, oct->fw_info.max_nic_ports); | |
1101 | goto core_drv_init_err; | |
1102 | } | |
1103 | oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags; | |
1104 | oct->fw_info.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode; | |
5b823514 RV |
1105 | oct->pfvf_hsword.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode; |
1106 | ||
1107 | oct->pfvf_hsword.pkind = recv_pkt->rh.r_core_drv_init.pkind; | |
1108 | ||
1109 | for (i = 0; i < oct->num_iqs; i++) | |
1110 | oct->instr_queue[i]->txpciq.s.pkind = oct->pfvf_hsword.pkind; | |
f21fb3ed RV |
1111 | |
1112 | atomic_set(&oct->status, OCT_DEV_CORE_OK); | |
1113 | ||
1114 | cs = &core_setup[oct->octeon_id]; | |
1115 | ||
1116 | if (recv_pkt->buffer_size[0] != sizeof(*cs)) { | |
1117 | dev_dbg(&oct->pci_dev->dev, "Core setup bytes expected %u found %d\n", | |
1118 | (u32)sizeof(*cs), | |
1119 | recv_pkt->buffer_size[0]); | |
1120 | } | |
1121 | ||
1122 | memcpy(cs, get_rbd(recv_pkt->buffer_ptr[0]), sizeof(*cs)); | |
1123 | strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME); | |
1124 | strncpy(oct->boardinfo.serial_number, cs->board_serial_number, | |
1125 | OCT_SERIAL_LEN); | |
1126 | ||
1127 | octeon_swap_8B_data((u64 *)cs, (sizeof(*cs) >> 3)); | |
1128 | ||
1129 | oct->boardinfo.major = cs->board_rev_major; | |
1130 | oct->boardinfo.minor = cs->board_rev_minor; | |
1131 | ||
1132 | dev_info(&oct->pci_dev->dev, | |
1133 | "Running %s (%llu Hz)\n", | |
1134 | app_name, CVM_CAST64(cs->corefreq)); | |
1135 | ||
1136 | core_drv_init_err: | |
1137 | for (i = 0; i < recv_pkt->buffer_count; i++) | |
1138 | recv_buffer_free(recv_pkt->buffer_ptr[i]); | |
1139 | octeon_free_recv_info(recv_info); | |
1140 | return 0; | |
1141 | } | |
1142 | ||
1143 | int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no) | |
1144 | ||
1145 | { | |
63da8404 RV |
1146 | if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES(oct)) && |
1147 | (oct->io_qmask.iq & (1ULL << q_no))) | |
f21fb3ed RV |
1148 | return oct->instr_queue[q_no]->max_count; |
1149 | ||
1150 | return -1; | |
1151 | } | |
1152 | ||
1153 | int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no) | |
1154 | { | |
63da8404 RV |
1155 | if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES(oct)) && |
1156 | (oct->io_qmask.oq & (1ULL << q_no))) | |
f21fb3ed RV |
1157 | return oct->droq[q_no]->max_count; |
1158 | return -1; | |
1159 | } | |
1160 | ||
1161 | /* Retruns the host firmware handshake OCTEON specific configuration */ | |
1162 | struct octeon_config *octeon_get_conf(struct octeon_device *oct) | |
1163 | { | |
1164 | struct octeon_config *default_oct_conf = NULL; | |
1165 | ||
1166 | /* check the OCTEON Device model & return the corresponding octeon | |
1167 | * configuration | |
1168 | */ | |
1169 | ||
1170 | if (OCTEON_CN6XXX(oct)) { | |
1171 | default_oct_conf = | |
1172 | (struct octeon_config *)(CHIP_FIELD(oct, cn6xxx, conf)); | |
e86b1ab6 RV |
1173 | } else if (OCTEON_CN23XX_PF(oct)) { |
1174 | default_oct_conf = (struct octeon_config *) | |
1175 | (CHIP_FIELD(oct, cn23xx_pf, conf)); | |
f21fb3ed | 1176 | } |
f21fb3ed RV |
1177 | return default_oct_conf; |
1178 | } | |
1179 | ||
1180 | /* scratch register address is same in all the OCT-II and CN70XX models */ | |
1181 | #define CNXX_SLI_SCRATCH1 0x3C0 | |
1182 | ||
1183 | /** Get the octeon device pointer. | |
1184 | * @param octeon_id - The id for which the octeon device pointer is required. | |
1185 | * @return Success: Octeon device pointer. | |
1186 | * @return Failure: NULL. | |
1187 | */ | |
1188 | struct octeon_device *lio_get_device(u32 octeon_id) | |
1189 | { | |
1190 | if (octeon_id >= MAX_OCTEON_DEVICES) | |
1191 | return NULL; | |
1192 | else | |
1193 | return octeon_device[octeon_id]; | |
1194 | } | |
1195 | ||
1196 | u64 lio_pci_readq(struct octeon_device *oct, u64 addr) | |
1197 | { | |
1198 | u64 val64; | |
1199 | unsigned long flags; | |
1200 | u32 val32, addrhi; | |
1201 | ||
1202 | spin_lock_irqsave(&oct->pci_win_lock, flags); | |
1203 | ||
1204 | /* The windowed read happens when the LSB of the addr is written. | |
1205 | * So write MSB first | |
1206 | */ | |
1207 | addrhi = (addr >> 32); | |
e86b1ab6 RV |
1208 | if ((oct->chip_id == OCTEON_CN66XX) || |
1209 | (oct->chip_id == OCTEON_CN68XX) || | |
1210 | (oct->chip_id == OCTEON_CN23XX_PF_VID)) | |
f21fb3ed RV |
1211 | addrhi |= 0x00060000; |
1212 | writel(addrhi, oct->reg_list.pci_win_rd_addr_hi); | |
1213 | ||
1214 | /* Read back to preserve ordering of writes */ | |
1215 | val32 = readl(oct->reg_list.pci_win_rd_addr_hi); | |
1216 | ||
1217 | writel(addr & 0xffffffff, oct->reg_list.pci_win_rd_addr_lo); | |
1218 | val32 = readl(oct->reg_list.pci_win_rd_addr_lo); | |
1219 | ||
1220 | val64 = readq(oct->reg_list.pci_win_rd_data); | |
1221 | ||
1222 | spin_unlock_irqrestore(&oct->pci_win_lock, flags); | |
1223 | ||
1224 | return val64; | |
1225 | } | |
1226 | ||
1227 | void lio_pci_writeq(struct octeon_device *oct, | |
1228 | u64 val, | |
1229 | u64 addr) | |
1230 | { | |
1231 | u32 val32; | |
1232 | unsigned long flags; | |
1233 | ||
1234 | spin_lock_irqsave(&oct->pci_win_lock, flags); | |
1235 | ||
1236 | writeq(addr, oct->reg_list.pci_win_wr_addr); | |
1237 | ||
1238 | /* The write happens when the LSB is written. So write MSB first. */ | |
1239 | writel(val >> 32, oct->reg_list.pci_win_wr_data_hi); | |
1240 | /* Read the MSB to ensure ordering of writes. */ | |
1241 | val32 = readl(oct->reg_list.pci_win_wr_data_hi); | |
1242 | ||
1243 | writel(val & 0xffffffff, oct->reg_list.pci_win_wr_data_lo); | |
1244 | ||
1245 | spin_unlock_irqrestore(&oct->pci_win_lock, flags); | |
1246 | } | |
1247 | ||
1248 | int octeon_mem_access_ok(struct octeon_device *oct) | |
1249 | { | |
1250 | u64 access_okay = 0; | |
63da8404 | 1251 | u64 lmc0_reset_ctl; |
f21fb3ed RV |
1252 | |
1253 | /* Check to make sure a DDR interface is enabled */ | |
e86b1ab6 RV |
1254 | if (OCTEON_CN23XX_PF(oct)) { |
1255 | lmc0_reset_ctl = lio_pci_readq(oct, CN23XX_LMC0_RESET_CTL); | |
1256 | access_okay = | |
1257 | (lmc0_reset_ctl & CN23XX_LMC0_RESET_CTL_DDR3RST_MASK); | |
1258 | } else { | |
1259 | lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL); | |
1260 | access_okay = | |
1261 | (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK); | |
1262 | } | |
f21fb3ed RV |
1263 | |
1264 | return access_okay ? 0 : 1; | |
1265 | } | |
1266 | ||
1267 | int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout) | |
1268 | { | |
1269 | int ret = 1; | |
1270 | u32 ms; | |
1271 | ||
1272 | if (!timeout) | |
1273 | return ret; | |
1274 | ||
f21fb3ed RV |
1275 | for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout)); |
1276 | ms += HZ / 10) { | |
1277 | ret = octeon_mem_access_ok(oct); | |
1278 | ||
1279 | /* wait 100 ms */ | |
1280 | if (ret) | |
1281 | schedule_timeout_uninterruptible(HZ / 10); | |
1282 | } | |
1283 | ||
1284 | return ret; | |
1285 | } | |
1286 | ||
1287 | /** Get the octeon id assigned to the octeon device passed as argument. | |
1288 | * This function is exported to other modules. | |
1289 | * @param dev - octeon device pointer passed as a void *. | |
1290 | * @return octeon device id | |
1291 | */ | |
1292 | int lio_get_device_id(void *dev) | |
1293 | { | |
1294 | struct octeon_device *octeon_dev = (struct octeon_device *)dev; | |
1295 | u32 i; | |
1296 | ||
1297 | for (i = 0; i < MAX_OCTEON_DEVICES; i++) | |
1298 | if (octeon_device[i] == octeon_dev) | |
1299 | return octeon_dev->octeon_id; | |
1300 | return -1; | |
1301 | } | |
cd8b1eb4 RV |
1302 | |
1303 | void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) | |
1304 | { | |
9ded1a51 RV |
1305 | u64 instr_cnt; |
1306 | struct octeon_device *oct = NULL; | |
1307 | ||
cd8b1eb4 RV |
1308 | /* the whole thing needs to be atomic, ideally */ |
1309 | if (droq) { | |
1310 | spin_lock_bh(&droq->lock); | |
1311 | writel(droq->pkt_count, droq->pkts_sent_reg); | |
1312 | droq->pkt_count = 0; | |
1313 | spin_unlock_bh(&droq->lock); | |
9ded1a51 | 1314 | oct = droq->oct_dev; |
cd8b1eb4 RV |
1315 | } |
1316 | if (iq) { | |
1317 | spin_lock_bh(&iq->lock); | |
1318 | writel(iq->pkt_in_done, iq->inst_cnt_reg); | |
1319 | iq->pkt_in_done = 0; | |
1320 | spin_unlock_bh(&iq->lock); | |
9ded1a51 RV |
1321 | oct = iq->oct_dev; |
1322 | } | |
1323 | /*write resend. Writing RESEND in SLI_PKTX_CNTS should be enough | |
1324 | *to trigger tx interrupts as well, if they are pending. | |
1325 | */ | |
1326 | if (oct && OCTEON_CN23XX_PF(oct)) { | |
1327 | if (droq) | |
1328 | writeq(CN23XX_INTR_RESEND, droq->pkts_sent_reg); | |
1329 | /*we race with firmrware here. read and write the IN_DONE_CNTS*/ | |
1330 | else if (iq) { | |
1331 | instr_cnt = readq(iq->inst_cnt_reg); | |
1332 | writeq(((instr_cnt & 0xFFFFFFFF00000000ULL) | | |
1333 | CN23XX_INTR_RESEND), | |
1334 | iq->inst_cnt_reg); | |
1335 | } | |
cd8b1eb4 RV |
1336 | } |
1337 | } |