]>
Commit | Line | Data |
---|---|---|
f931551b RC |
1 | /* |
2 | * Copyright (c) 2010 QLogic Corporation. All rights reserved. | |
3 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | |
4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | |
5 | * | |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | */ | |
34 | ||
35 | /* | |
36 | * This file contains support for diagnostic functions. It is accessed by | |
37 | * opening the qib_diag device, normally minor number 129. Diagnostic use | |
38 | * of the QLogic_IB chip may render the chip or board unusable until the | |
39 | * driver is unloaded, or in some cases, until the system is rebooted. | |
40 | * | |
41 | * Accesses to the chip through this interface are not similar to going | |
42 | * through the /sys/bus/pci resource mmap interface. | |
43 | */ | |
44 | ||
45 | #include <linux/io.h> | |
46 | #include <linux/pci.h> | |
47 | #include <linux/poll.h> | |
48 | #include <linux/vmalloc.h> | |
49 | #include <linux/fs.h> | |
50 | #include <linux/uaccess.h> | |
51 | ||
52 | #include "qib.h" | |
53 | #include "qib_common.h" | |
54 | ||
55 | /* | |
56 | * Each client that opens the diag device must read then write | |
57 | * offset 0, to prevent lossage from random cat or od. diag_state | |
58 | * sequences this "handshake". | |
59 | */ | |
60 | enum diag_state { UNUSED = 0, OPENED, INIT, READY }; | |
61 | ||
62 | /* State for an individual client. PID so children cannot abuse handshake */ | |
63 | static struct qib_diag_client { | |
64 | struct qib_diag_client *next; | |
65 | struct qib_devdata *dd; | |
66 | pid_t pid; | |
67 | enum diag_state state; | |
68 | } *client_pool; | |
69 | ||
70 | /* | |
71 | * Get a client struct. Recycled if possible, else kmalloc. | |
72 | * Must be called with qib_mutex held | |
73 | */ | |
74 | static struct qib_diag_client *get_client(struct qib_devdata *dd) | |
75 | { | |
76 | struct qib_diag_client *dc; | |
77 | ||
78 | dc = client_pool; | |
79 | if (dc) | |
80 | /* got from pool remove it and use */ | |
81 | client_pool = dc->next; | |
82 | else | |
83 | /* None in pool, alloc and init */ | |
84 | dc = kmalloc(sizeof *dc, GFP_KERNEL); | |
85 | ||
86 | if (dc) { | |
87 | dc->next = NULL; | |
88 | dc->dd = dd; | |
89 | dc->pid = current->pid; | |
90 | dc->state = OPENED; | |
91 | } | |
92 | return dc; | |
93 | } | |
94 | ||
95 | /* | |
96 | * Return to pool. Must be called with qib_mutex held | |
97 | */ | |
98 | static void return_client(struct qib_diag_client *dc) | |
99 | { | |
100 | struct qib_devdata *dd = dc->dd; | |
101 | struct qib_diag_client *tdc, *rdc; | |
102 | ||
103 | rdc = NULL; | |
104 | if (dc == dd->diag_client) { | |
105 | dd->diag_client = dc->next; | |
106 | rdc = dc; | |
107 | } else { | |
108 | tdc = dc->dd->diag_client; | |
109 | while (tdc) { | |
110 | if (dc == tdc->next) { | |
111 | tdc->next = dc->next; | |
112 | rdc = dc; | |
113 | break; | |
114 | } | |
115 | tdc = tdc->next; | |
116 | } | |
117 | } | |
118 | if (rdc) { | |
119 | rdc->state = UNUSED; | |
120 | rdc->dd = NULL; | |
121 | rdc->pid = 0; | |
122 | rdc->next = client_pool; | |
123 | client_pool = rdc; | |
124 | } | |
125 | } | |
126 | ||
127 | static int qib_diag_open(struct inode *in, struct file *fp); | |
128 | static int qib_diag_release(struct inode *in, struct file *fp); | |
129 | static ssize_t qib_diag_read(struct file *fp, char __user *data, | |
130 | size_t count, loff_t *off); | |
131 | static ssize_t qib_diag_write(struct file *fp, const char __user *data, | |
132 | size_t count, loff_t *off); | |
133 | ||
134 | static const struct file_operations diag_file_ops = { | |
135 | .owner = THIS_MODULE, | |
136 | .write = qib_diag_write, | |
137 | .read = qib_diag_read, | |
138 | .open = qib_diag_open, | |
6038f373 AB |
139 | .release = qib_diag_release, |
140 | .llseek = default_llseek, | |
f931551b RC |
141 | }; |
142 | ||
143 | static atomic_t diagpkt_count = ATOMIC_INIT(0); | |
144 | static struct cdev *diagpkt_cdev; | |
145 | static struct device *diagpkt_device; | |
146 | ||
147 | static ssize_t qib_diagpkt_write(struct file *fp, const char __user *data, | |
148 | size_t count, loff_t *off); | |
149 | ||
150 | static const struct file_operations diagpkt_file_ops = { | |
151 | .owner = THIS_MODULE, | |
152 | .write = qib_diagpkt_write, | |
6038f373 | 153 | .llseek = noop_llseek, |
f931551b RC |
154 | }; |
155 | ||
156 | int qib_diag_add(struct qib_devdata *dd) | |
157 | { | |
158 | char name[16]; | |
159 | int ret = 0; | |
160 | ||
161 | if (atomic_inc_return(&diagpkt_count) == 1) { | |
162 | ret = qib_cdev_init(QIB_DIAGPKT_MINOR, "ipath_diagpkt", | |
163 | &diagpkt_file_ops, &diagpkt_cdev, | |
164 | &diagpkt_device); | |
165 | if (ret) | |
166 | goto done; | |
167 | } | |
168 | ||
169 | snprintf(name, sizeof(name), "ipath_diag%d", dd->unit); | |
170 | ret = qib_cdev_init(QIB_DIAG_MINOR_BASE + dd->unit, name, | |
171 | &diag_file_ops, &dd->diag_cdev, | |
172 | &dd->diag_device); | |
173 | done: | |
174 | return ret; | |
175 | } | |
176 | ||
177 | static void qib_unregister_observers(struct qib_devdata *dd); | |
178 | ||
179 | void qib_diag_remove(struct qib_devdata *dd) | |
180 | { | |
181 | struct qib_diag_client *dc; | |
182 | ||
183 | if (atomic_dec_and_test(&diagpkt_count)) | |
184 | qib_cdev_cleanup(&diagpkt_cdev, &diagpkt_device); | |
185 | ||
186 | qib_cdev_cleanup(&dd->diag_cdev, &dd->diag_device); | |
187 | ||
188 | /* | |
189 | * Return all diag_clients of this device. There should be none, | |
190 | * as we are "guaranteed" that no clients are still open | |
191 | */ | |
192 | while (dd->diag_client) | |
193 | return_client(dd->diag_client); | |
194 | ||
195 | /* Now clean up all unused client structs */ | |
196 | while (client_pool) { | |
197 | dc = client_pool; | |
198 | client_pool = dc->next; | |
199 | kfree(dc); | |
200 | } | |
201 | /* Clean up observer list */ | |
202 | qib_unregister_observers(dd); | |
203 | } | |
204 | ||
205 | /* qib_remap_ioaddr32 - remap an offset into chip address space to __iomem * | |
206 | * | |
207 | * @dd: the qlogic_ib device | |
208 | * @offs: the offset in chip-space | |
209 | * @cntp: Pointer to max (byte) count for transfer starting at offset | |
210 | * This returns a u32 __iomem * so it can be used for both 64 and 32-bit | |
211 | * mapping. It is needed because with the use of PAT for control of | |
212 | * write-combining, the logically contiguous address-space of the chip | |
213 | * may be split into virtually non-contiguous spaces, with different | |
214 | * attributes, which are them mapped to contiguous physical space | |
215 | * based from the first BAR. | |
216 | * | |
217 | * The code below makes the same assumptions as were made in | |
218 | * init_chip_wc_pat() (qib_init.c), copied here: | |
219 | * Assumes chip address space looks like: | |
220 | * - kregs + sregs + cregs + uregs (in any order) | |
221 | * - piobufs (2K and 4K bufs in either order) | |
222 | * or: | |
223 | * - kregs + sregs + cregs (in any order) | |
224 | * - piobufs (2K and 4K bufs in either order) | |
225 | * - uregs | |
226 | * | |
227 | * If cntp is non-NULL, returns how many bytes from offset can be accessed | |
228 | * Returns 0 if the offset is not mapped. | |
229 | */ | |
230 | static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset, | |
231 | u32 *cntp) | |
232 | { | |
233 | u32 kreglen; | |
234 | u32 snd_bottom, snd_lim = 0; | |
235 | u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase; | |
236 | u32 __iomem *map = NULL; | |
237 | u32 cnt = 0; | |
fce24a9d | 238 | u32 tot4k, offs4k; |
f931551b RC |
239 | |
240 | /* First, simplest case, offset is within the first map. */ | |
241 | kreglen = (dd->kregend - dd->kregbase) * sizeof(u64); | |
242 | if (offset < kreglen) { | |
243 | map = krb32 + (offset / sizeof(u32)); | |
244 | cnt = kreglen - offset; | |
245 | goto mapped; | |
246 | } | |
247 | ||
248 | /* | |
249 | * Next check for user regs, the next most common case, | |
250 | * and a cheap check because if they are not in the first map | |
251 | * they are last in chip. | |
252 | */ | |
253 | if (dd->userbase) { | |
254 | /* If user regs mapped, they are after send, so set limit. */ | |
255 | u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase; | |
fce24a9d DO |
256 | if (!dd->piovl15base) |
257 | snd_lim = dd->uregbase; | |
f931551b RC |
258 | krb32 = (u32 __iomem *)dd->userbase; |
259 | if (offset >= dd->uregbase && offset < ulim) { | |
260 | map = krb32 + (offset - dd->uregbase) / sizeof(u32); | |
261 | cnt = ulim - offset; | |
262 | goto mapped; | |
263 | } | |
264 | } | |
265 | ||
266 | /* | |
267 | * Lastly, check for offset within Send Buffers. | |
268 | * This is gnarly because struct devdata is deliberately vague | |
269 | * about things like 7322 VL15 buffers, and we are not in | |
270 | * chip-specific code here, so should not make many assumptions. | |
271 | * The one we _do_ make is that the only chip that has more sndbufs | |
272 | * than we admit is the 7322, and it has userregs above that, so | |
273 | * we know the snd_lim. | |
274 | */ | |
275 | /* Assume 2K buffers are first. */ | |
276 | snd_bottom = dd->pio2k_bufbase; | |
277 | if (snd_lim == 0) { | |
278 | u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign); | |
279 | snd_lim = snd_bottom + tot2k; | |
280 | } | |
281 | /* If 4k buffers exist, account for them by bumping | |
282 | * appropriate limit. | |
283 | */ | |
fce24a9d DO |
284 | tot4k = dd->piobcnt4k * dd->align4k; |
285 | offs4k = dd->piobufbase >> 32; | |
f931551b | 286 | if (dd->piobcnt4k) { |
f931551b RC |
287 | if (snd_bottom > offs4k) |
288 | snd_bottom = offs4k; | |
289 | else { | |
290 | /* 4k above 2k. Bump snd_lim, if needed*/ | |
fce24a9d | 291 | if (!dd->userbase || dd->piovl15base) |
f931551b RC |
292 | snd_lim = offs4k + tot4k; |
293 | } | |
294 | } | |
295 | /* | |
296 | * Judgement call: can we ignore the space between SendBuffs and | |
297 | * UserRegs, where we would like to see vl15 buffs, but not more? | |
298 | */ | |
299 | if (offset >= snd_bottom && offset < snd_lim) { | |
300 | offset -= snd_bottom; | |
301 | map = (u32 __iomem *)dd->piobase + (offset / sizeof(u32)); | |
302 | cnt = snd_lim - offset; | |
303 | } | |
304 | ||
fce24a9d DO |
305 | if (!map && offs4k && dd->piovl15base) { |
306 | snd_lim = offs4k + tot4k + 2 * dd->align4k; | |
307 | if (offset >= (offs4k + tot4k) && offset < snd_lim) { | |
308 | map = (u32 __iomem *)dd->piovl15base + | |
309 | ((offset - (offs4k + tot4k)) / sizeof(u32)); | |
310 | cnt = snd_lim - offset; | |
311 | } | |
312 | } | |
313 | ||
f931551b RC |
314 | mapped: |
315 | if (cntp) | |
316 | *cntp = cnt; | |
317 | return map; | |
318 | } | |
319 | ||
320 | /* | |
321 | * qib_read_umem64 - read a 64-bit quantity from the chip into user space | |
322 | * @dd: the qlogic_ib device | |
323 | * @uaddr: the location to store the data in user memory | |
324 | * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore) | |
325 | * @count: number of bytes to copy (multiple of 32 bits) | |
326 | * | |
327 | * This function also localizes all chip memory accesses. | |
328 | * The copy should be written such that we read full cacheline packets | |
329 | * from the chip. This is usually used for a single qword | |
330 | * | |
331 | * NOTE: This assumes the chip address is 64-bit aligned. | |
332 | */ | |
333 | static int qib_read_umem64(struct qib_devdata *dd, void __user *uaddr, | |
334 | u32 regoffs, size_t count) | |
335 | { | |
336 | const u64 __iomem *reg_addr; | |
337 | const u64 __iomem *reg_end; | |
338 | u32 limit; | |
339 | int ret; | |
340 | ||
341 | reg_addr = (const u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit); | |
342 | if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) { | |
343 | ret = -EINVAL; | |
344 | goto bail; | |
345 | } | |
346 | if (count >= limit) | |
347 | count = limit; | |
348 | reg_end = reg_addr + (count / sizeof(u64)); | |
349 | ||
350 | /* not very efficient, but it works for now */ | |
351 | while (reg_addr < reg_end) { | |
352 | u64 data = readq(reg_addr); | |
353 | ||
354 | if (copy_to_user(uaddr, &data, sizeof(u64))) { | |
355 | ret = -EFAULT; | |
356 | goto bail; | |
357 | } | |
358 | reg_addr++; | |
359 | uaddr += sizeof(u64); | |
360 | } | |
361 | ret = 0; | |
362 | bail: | |
363 | return ret; | |
364 | } | |
365 | ||
366 | /* | |
367 | * qib_write_umem64 - write a 64-bit quantity to the chip from user space | |
368 | * @dd: the qlogic_ib device | |
369 | * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore) | |
370 | * @uaddr: the source of the data in user memory | |
371 | * @count: the number of bytes to copy (multiple of 32 bits) | |
372 | * | |
373 | * This is usually used for a single qword | |
374 | * NOTE: This assumes the chip address is 64-bit aligned. | |
375 | */ | |
376 | ||
377 | static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs, | |
378 | const void __user *uaddr, size_t count) | |
379 | { | |
380 | u64 __iomem *reg_addr; | |
381 | const u64 __iomem *reg_end; | |
382 | u32 limit; | |
383 | int ret; | |
384 | ||
385 | reg_addr = (u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit); | |
386 | if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) { | |
387 | ret = -EINVAL; | |
388 | goto bail; | |
389 | } | |
390 | if (count >= limit) | |
391 | count = limit; | |
392 | reg_end = reg_addr + (count / sizeof(u64)); | |
393 | ||
394 | /* not very efficient, but it works for now */ | |
395 | while (reg_addr < reg_end) { | |
396 | u64 data; | |
397 | if (copy_from_user(&data, uaddr, sizeof(data))) { | |
398 | ret = -EFAULT; | |
399 | goto bail; | |
400 | } | |
401 | writeq(data, reg_addr); | |
402 | ||
403 | reg_addr++; | |
404 | uaddr += sizeof(u64); | |
405 | } | |
406 | ret = 0; | |
407 | bail: | |
408 | return ret; | |
409 | } | |
410 | ||
411 | /* | |
412 | * qib_read_umem32 - read a 32-bit quantity from the chip into user space | |
413 | * @dd: the qlogic_ib device | |
414 | * @uaddr: the location to store the data in user memory | |
415 | * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore) | |
416 | * @count: number of bytes to copy | |
417 | * | |
418 | * read 32 bit values, not 64 bit; for memories that only | |
419 | * support 32 bit reads; usually a single dword. | |
420 | */ | |
421 | static int qib_read_umem32(struct qib_devdata *dd, void __user *uaddr, | |
422 | u32 regoffs, size_t count) | |
423 | { | |
424 | const u32 __iomem *reg_addr; | |
425 | const u32 __iomem *reg_end; | |
426 | u32 limit; | |
427 | int ret; | |
428 | ||
429 | reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit); | |
430 | if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) { | |
431 | ret = -EINVAL; | |
432 | goto bail; | |
433 | } | |
434 | if (count >= limit) | |
435 | count = limit; | |
436 | reg_end = reg_addr + (count / sizeof(u32)); | |
437 | ||
438 | /* not very efficient, but it works for now */ | |
439 | while (reg_addr < reg_end) { | |
440 | u32 data = readl(reg_addr); | |
441 | ||
442 | if (copy_to_user(uaddr, &data, sizeof(data))) { | |
443 | ret = -EFAULT; | |
444 | goto bail; | |
445 | } | |
446 | ||
447 | reg_addr++; | |
448 | uaddr += sizeof(u32); | |
449 | ||
450 | } | |
451 | ret = 0; | |
452 | bail: | |
453 | return ret; | |
454 | } | |
455 | ||
456 | /* | |
457 | * qib_write_umem32 - write a 32-bit quantity to the chip from user space | |
458 | * @dd: the qlogic_ib device | |
459 | * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore) | |
460 | * @uaddr: the source of the data in user memory | |
461 | * @count: number of bytes to copy | |
462 | * | |
463 | * write 32 bit values, not 64 bit; for memories that only | |
464 | * support 32 bit write; usually a single dword. | |
465 | */ | |
466 | ||
467 | static int qib_write_umem32(struct qib_devdata *dd, u32 regoffs, | |
468 | const void __user *uaddr, size_t count) | |
469 | { | |
470 | u32 __iomem *reg_addr; | |
471 | const u32 __iomem *reg_end; | |
472 | u32 limit; | |
473 | int ret; | |
474 | ||
475 | reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit); | |
476 | if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) { | |
477 | ret = -EINVAL; | |
478 | goto bail; | |
479 | } | |
480 | if (count >= limit) | |
481 | count = limit; | |
482 | reg_end = reg_addr + (count / sizeof(u32)); | |
483 | ||
484 | while (reg_addr < reg_end) { | |
485 | u32 data; | |
486 | ||
487 | if (copy_from_user(&data, uaddr, sizeof(data))) { | |
488 | ret = -EFAULT; | |
489 | goto bail; | |
490 | } | |
491 | writel(data, reg_addr); | |
492 | ||
493 | reg_addr++; | |
494 | uaddr += sizeof(u32); | |
495 | } | |
496 | ret = 0; | |
497 | bail: | |
498 | return ret; | |
499 | } | |
500 | ||
501 | static int qib_diag_open(struct inode *in, struct file *fp) | |
502 | { | |
503 | int unit = iminor(in) - QIB_DIAG_MINOR_BASE; | |
504 | struct qib_devdata *dd; | |
505 | struct qib_diag_client *dc; | |
506 | int ret; | |
507 | ||
508 | mutex_lock(&qib_mutex); | |
509 | ||
510 | dd = qib_lookup(unit); | |
511 | ||
512 | if (dd == NULL || !(dd->flags & QIB_PRESENT) || | |
513 | !dd->kregbase) { | |
514 | ret = -ENODEV; | |
515 | goto bail; | |
516 | } | |
517 | ||
518 | dc = get_client(dd); | |
519 | if (!dc) { | |
520 | ret = -ENOMEM; | |
521 | goto bail; | |
522 | } | |
523 | dc->next = dd->diag_client; | |
524 | dd->diag_client = dc; | |
525 | fp->private_data = dc; | |
526 | ret = 0; | |
527 | bail: | |
528 | mutex_unlock(&qib_mutex); | |
529 | ||
530 | return ret; | |
531 | } | |
532 | ||
533 | /** | |
534 | * qib_diagpkt_write - write an IB packet | |
535 | * @fp: the diag data device file pointer | |
536 | * @data: qib_diag_pkt structure saying where to get the packet | |
537 | * @count: size of data to write | |
538 | * @off: unused by this code | |
539 | */ | |
540 | static ssize_t qib_diagpkt_write(struct file *fp, | |
541 | const char __user *data, | |
542 | size_t count, loff_t *off) | |
543 | { | |
544 | u32 __iomem *piobuf; | |
545 | u32 plen, clen, pbufn; | |
546 | struct qib_diag_xpkt dp; | |
547 | u32 *tmpbuf = NULL; | |
548 | struct qib_devdata *dd; | |
549 | struct qib_pportdata *ppd; | |
550 | ssize_t ret = 0; | |
551 | ||
552 | if (count != sizeof(dp)) { | |
553 | ret = -EINVAL; | |
554 | goto bail; | |
555 | } | |
556 | if (copy_from_user(&dp, data, sizeof(dp))) { | |
557 | ret = -EFAULT; | |
558 | goto bail; | |
559 | } | |
560 | ||
561 | dd = qib_lookup(dp.unit); | |
562 | if (!dd || !(dd->flags & QIB_PRESENT) || !dd->kregbase) { | |
563 | ret = -ENODEV; | |
564 | goto bail; | |
565 | } | |
566 | if (!(dd->flags & QIB_INITTED)) { | |
567 | /* no hardware, freeze, etc. */ | |
568 | ret = -ENODEV; | |
569 | goto bail; | |
570 | } | |
571 | ||
572 | if (dp.version != _DIAG_XPKT_VERS) { | |
573 | qib_dev_err(dd, "Invalid version %u for diagpkt_write\n", | |
574 | dp.version); | |
575 | ret = -EINVAL; | |
576 | goto bail; | |
577 | } | |
578 | /* send count must be an exact number of dwords */ | |
579 | if (dp.len & 3) { | |
580 | ret = -EINVAL; | |
581 | goto bail; | |
582 | } | |
583 | if (!dp.port || dp.port > dd->num_pports) { | |
584 | ret = -EINVAL; | |
585 | goto bail; | |
586 | } | |
587 | ppd = &dd->pport[dp.port - 1]; | |
588 | ||
589 | /* need total length before first word written */ | |
590 | /* +1 word is for the qword padding */ | |
591 | plen = sizeof(u32) + dp.len; | |
592 | clen = dp.len >> 2; | |
593 | ||
594 | if ((plen + 4) > ppd->ibmaxlen) { | |
595 | ret = -EINVAL; | |
596 | goto bail; /* before writing pbc */ | |
597 | } | |
598 | tmpbuf = vmalloc(plen); | |
599 | if (!tmpbuf) { | |
600 | qib_devinfo(dd->pcidev, "Unable to allocate tmp buffer, " | |
601 | "failing\n"); | |
602 | ret = -ENOMEM; | |
603 | goto bail; | |
604 | } | |
605 | ||
606 | if (copy_from_user(tmpbuf, | |
607 | (const void __user *) (unsigned long) dp.data, | |
608 | dp.len)) { | |
609 | ret = -EFAULT; | |
610 | goto bail; | |
611 | } | |
612 | ||
613 | plen >>= 2; /* in dwords */ | |
614 | ||
615 | if (dp.pbc_wd == 0) | |
616 | dp.pbc_wd = plen; | |
617 | ||
618 | piobuf = dd->f_getsendbuf(ppd, dp.pbc_wd, &pbufn); | |
619 | if (!piobuf) { | |
620 | ret = -EBUSY; | |
621 | goto bail; | |
622 | } | |
623 | /* disarm it just to be extra sure */ | |
624 | dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbufn)); | |
625 | ||
626 | /* disable header check on pbufn for this packet */ | |
627 | dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_DIS1, NULL); | |
628 | ||
629 | writeq(dp.pbc_wd, piobuf); | |
630 | /* | |
631 | * Copy all but the trigger word, then flush, so it's written | |
632 | * to chip before trigger word, then write trigger word, then | |
633 | * flush again, so packet is sent. | |
634 | */ | |
635 | if (dd->flags & QIB_PIO_FLUSH_WC) { | |
636 | qib_flush_wc(); | |
637 | qib_pio_copy(piobuf + 2, tmpbuf, clen - 1); | |
638 | qib_flush_wc(); | |
639 | __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1); | |
640 | } else | |
641 | qib_pio_copy(piobuf + 2, tmpbuf, clen); | |
642 | ||
643 | if (dd->flags & QIB_USE_SPCL_TRIG) { | |
644 | u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; | |
645 | ||
646 | qib_flush_wc(); | |
647 | __raw_writel(0xaebecede, piobuf + spcl_off); | |
648 | } | |
649 | ||
650 | /* | |
651 | * Ensure buffer is written to the chip, then re-enable | |
652 | * header checks (if supported by chip). The txchk | |
653 | * code will ensure seen by chip before returning. | |
654 | */ | |
655 | qib_flush_wc(); | |
656 | qib_sendbuf_done(dd, pbufn); | |
657 | dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_ENAB1, NULL); | |
658 | ||
659 | ret = sizeof(dp); | |
660 | ||
661 | bail: | |
662 | vfree(tmpbuf); | |
663 | return ret; | |
664 | } | |
665 | ||
666 | static int qib_diag_release(struct inode *in, struct file *fp) | |
667 | { | |
668 | mutex_lock(&qib_mutex); | |
669 | return_client(fp->private_data); | |
670 | fp->private_data = NULL; | |
671 | mutex_unlock(&qib_mutex); | |
672 | return 0; | |
673 | } | |
674 | ||
675 | /* | |
676 | * Chip-specific code calls to register its interest in | |
677 | * a specific range. | |
678 | */ | |
679 | struct diag_observer_list_elt { | |
680 | struct diag_observer_list_elt *next; | |
681 | const struct diag_observer *op; | |
682 | }; | |
683 | ||
684 | int qib_register_observer(struct qib_devdata *dd, | |
685 | const struct diag_observer *op) | |
686 | { | |
687 | struct diag_observer_list_elt *olp; | |
688 | int ret = -EINVAL; | |
689 | ||
690 | if (!dd || !op) | |
691 | goto bail; | |
692 | ret = -ENOMEM; | |
693 | olp = vmalloc(sizeof *olp); | |
694 | if (!olp) { | |
695 | printk(KERN_ERR QIB_DRV_NAME ": vmalloc for observer failed\n"); | |
696 | goto bail; | |
697 | } | |
698 | if (olp) { | |
699 | unsigned long flags; | |
700 | ||
701 | spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); | |
702 | olp->op = op; | |
703 | olp->next = dd->diag_observer_list; | |
704 | dd->diag_observer_list = olp; | |
705 | spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); | |
706 | ret = 0; | |
707 | } | |
708 | bail: | |
709 | return ret; | |
710 | } | |
711 | ||
712 | /* Remove all registered observers when device is closed */ | |
713 | static void qib_unregister_observers(struct qib_devdata *dd) | |
714 | { | |
715 | struct diag_observer_list_elt *olp; | |
716 | unsigned long flags; | |
717 | ||
718 | spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); | |
719 | olp = dd->diag_observer_list; | |
720 | while (olp) { | |
721 | /* Pop one observer, let go of lock */ | |
722 | dd->diag_observer_list = olp->next; | |
723 | spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); | |
724 | vfree(olp); | |
725 | /* try again. */ | |
726 | spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); | |
727 | olp = dd->diag_observer_list; | |
728 | } | |
729 | spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); | |
730 | } | |
731 | ||
732 | /* | |
733 | * Find the observer, if any, for the specified address. Initial implementation | |
734 | * is simple stack of observers. This must be called with diag transaction | |
735 | * lock held. | |
736 | */ | |
737 | static const struct diag_observer *diag_get_observer(struct qib_devdata *dd, | |
738 | u32 addr) | |
739 | { | |
740 | struct diag_observer_list_elt *olp; | |
741 | const struct diag_observer *op = NULL; | |
742 | ||
743 | olp = dd->diag_observer_list; | |
744 | while (olp) { | |
745 | op = olp->op; | |
746 | if (addr >= op->bottom && addr <= op->top) | |
747 | break; | |
748 | olp = olp->next; | |
749 | } | |
750 | if (!olp) | |
751 | op = NULL; | |
752 | ||
753 | return op; | |
754 | } | |
755 | ||
756 | static ssize_t qib_diag_read(struct file *fp, char __user *data, | |
757 | size_t count, loff_t *off) | |
758 | { | |
759 | struct qib_diag_client *dc = fp->private_data; | |
760 | struct qib_devdata *dd = dc->dd; | |
761 | void __iomem *kreg_base; | |
762 | ssize_t ret; | |
763 | ||
764 | if (dc->pid != current->pid) { | |
765 | ret = -EPERM; | |
766 | goto bail; | |
767 | } | |
768 | ||
769 | kreg_base = dd->kregbase; | |
770 | ||
771 | if (count == 0) | |
772 | ret = 0; | |
773 | else if ((count % 4) || (*off % 4)) | |
774 | /* address or length is not 32-bit aligned, hence invalid */ | |
775 | ret = -EINVAL; | |
776 | else if (dc->state < READY && (*off || count != 8)) | |
777 | ret = -EINVAL; /* prevent cat /dev/qib_diag* */ | |
778 | else { | |
779 | unsigned long flags; | |
780 | u64 data64 = 0; | |
781 | int use_32; | |
782 | const struct diag_observer *op; | |
783 | ||
784 | use_32 = (count % 8) || (*off % 8); | |
785 | ret = -1; | |
786 | spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); | |
787 | /* | |
788 | * Check for observer on this address range. | |
789 | * we only support a single 32 or 64-bit read | |
790 | * via observer, currently. | |
791 | */ | |
792 | op = diag_get_observer(dd, *off); | |
793 | if (op) { | |
794 | u32 offset = *off; | |
795 | ret = op->hook(dd, op, offset, &data64, 0, use_32); | |
796 | } | |
797 | /* | |
798 | * We need to release lock before any copy_to_user(), | |
799 | * whether implicit in qib_read_umem* or explicit below. | |
800 | */ | |
801 | spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); | |
802 | if (!op) { | |
803 | if (use_32) | |
804 | /* | |
805 | * Address or length is not 64-bit aligned; | |
806 | * do 32-bit rd | |
807 | */ | |
808 | ret = qib_read_umem32(dd, data, (u32) *off, | |
809 | count); | |
810 | else | |
811 | ret = qib_read_umem64(dd, data, (u32) *off, | |
812 | count); | |
813 | } else if (ret == count) { | |
814 | /* Below finishes case where observer existed */ | |
815 | ret = copy_to_user(data, &data64, use_32 ? | |
816 | sizeof(u32) : sizeof(u64)); | |
817 | if (ret) | |
818 | ret = -EFAULT; | |
819 | } | |
820 | } | |
821 | ||
822 | if (ret >= 0) { | |
823 | *off += count; | |
824 | ret = count; | |
825 | if (dc->state == OPENED) | |
826 | dc->state = INIT; | |
827 | } | |
828 | bail: | |
829 | return ret; | |
830 | } | |
831 | ||
832 | static ssize_t qib_diag_write(struct file *fp, const char __user *data, | |
833 | size_t count, loff_t *off) | |
834 | { | |
835 | struct qib_diag_client *dc = fp->private_data; | |
836 | struct qib_devdata *dd = dc->dd; | |
837 | void __iomem *kreg_base; | |
838 | ssize_t ret; | |
839 | ||
840 | if (dc->pid != current->pid) { | |
841 | ret = -EPERM; | |
842 | goto bail; | |
843 | } | |
844 | ||
845 | kreg_base = dd->kregbase; | |
846 | ||
847 | if (count == 0) | |
848 | ret = 0; | |
849 | else if ((count % 4) || (*off % 4)) | |
850 | /* address or length is not 32-bit aligned, hence invalid */ | |
851 | ret = -EINVAL; | |
852 | else if (dc->state < READY && | |
853 | ((*off || count != 8) || dc->state != INIT)) | |
854 | /* No writes except second-step of init seq */ | |
855 | ret = -EINVAL; /* before any other write allowed */ | |
856 | else { | |
857 | unsigned long flags; | |
858 | const struct diag_observer *op = NULL; | |
859 | int use_32 = (count % 8) || (*off % 8); | |
860 | ||
861 | /* | |
862 | * Check for observer on this address range. | |
863 | * We only support a single 32 or 64-bit write | |
864 | * via observer, currently. This helps, because | |
865 | * we would otherwise have to jump through hoops | |
866 | * to make "diag transaction" meaningful when we | |
867 | * cannot do a copy_from_user while holding the lock. | |
868 | */ | |
869 | if (count == 4 || count == 8) { | |
870 | u64 data64; | |
871 | u32 offset = *off; | |
872 | ret = copy_from_user(&data64, data, count); | |
873 | if (ret) { | |
874 | ret = -EFAULT; | |
875 | goto bail; | |
876 | } | |
877 | spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); | |
878 | op = diag_get_observer(dd, *off); | |
879 | if (op) | |
880 | ret = op->hook(dd, op, offset, &data64, ~0Ull, | |
881 | use_32); | |
882 | spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); | |
883 | } | |
884 | ||
885 | if (!op) { | |
886 | if (use_32) | |
887 | /* | |
888 | * Address or length is not 64-bit aligned; | |
889 | * do 32-bit write | |
890 | */ | |
891 | ret = qib_write_umem32(dd, (u32) *off, data, | |
892 | count); | |
893 | else | |
894 | ret = qib_write_umem64(dd, (u32) *off, data, | |
895 | count); | |
896 | } | |
897 | } | |
898 | ||
899 | if (ret >= 0) { | |
900 | *off += count; | |
901 | ret = count; | |
902 | if (dc->state == INIT) | |
903 | dc->state = READY; /* all read/write OK now */ | |
904 | } | |
905 | bail: | |
906 | return ret; | |
907 | } |